Comparison with SQL¶
Since many potential pandas users have some familiarity with SQL, this page is meant to provide some examples of how various SQL operations would be performed using pandas.
If you’re new to pandas, you might want to first read through 10 Minutes to pandas to familiarize yourself with the library.
As is customary, we import pandas and NumPy as follows:
In [1]: import pandas as pd
In [2]: import numpy as np
Most of the examples will utilize the tips
dataset found within pandas tests. We’ll read
the data into a DataFrame called tips and assume we have a database table of the same name and
structure.
In [3]: url = ('https://raw.github.com/pandas-dev'
...: '/pandas/master/pandas/tests/io/data/csv/tips.csv')
...:
In [4]: tips = pd.read_csv(url)
---------------------------------------------------------------------------
TimeoutError Traceback (most recent call last)
/opt/anaconda3/lib/python3.7/urllib/request.py in do_open(self, http_class, req, **http_conn_args)
1318 h.request(req.get_method(), req.selector, req.data, headers,
-> 1319 encode_chunked=req.has_header('Transfer-encoding'))
1320 except OSError as err: # timeout error
/opt/anaconda3/lib/python3.7/http/client.py in request(self, method, url, body, headers, encode_chunked)
1251 """Send a complete request to the server."""
-> 1252 self._send_request(method, url, body, headers, encode_chunked)
1253
/opt/anaconda3/lib/python3.7/http/client.py in _send_request(self, method, url, body, headers, encode_chunked)
1297 body = _encode(body, 'body')
-> 1298 self.endheaders(body, encode_chunked=encode_chunked)
1299
/opt/anaconda3/lib/python3.7/http/client.py in endheaders(self, message_body, encode_chunked)
1246 raise CannotSendHeader()
-> 1247 self._send_output(message_body, encode_chunked=encode_chunked)
1248
/opt/anaconda3/lib/python3.7/http/client.py in _send_output(self, message_body, encode_chunked)
1025 del self._buffer[:]
-> 1026 self.send(msg)
1027
/opt/anaconda3/lib/python3.7/http/client.py in send(self, data)
965 if self.auto_open:
--> 966 self.connect()
967 else:
/opt/anaconda3/lib/python3.7/http/client.py in connect(self)
1413
-> 1414 super().connect()
1415
/opt/anaconda3/lib/python3.7/http/client.py in connect(self)
937 self.sock = self._create_connection(
--> 938 (self.host,self.port), self.timeout, self.source_address)
939 self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
/opt/anaconda3/lib/python3.7/socket.py in create_connection(address, timeout, source_address)
727 try:
--> 728 raise err
729 finally:
/opt/anaconda3/lib/python3.7/socket.py in create_connection(address, timeout, source_address)
715 sock.bind(source_address)
--> 716 sock.connect(sa)
717 # Break explicitly a reference cycle
TimeoutError: [Errno 110] Connection timed out
During handling of the above exception, another exception occurred:
URLError Traceback (most recent call last)
<ipython-input-4-8ab2297b7141> in <module>
----> 1 tips = pd.read_csv(url)
~/build/pandas/pandas/io/parsers.py in read_csv(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, dialect, error_bad_lines, warn_bad_lines, delim_whitespace, low_memory, memory_map, float_precision)
684 )
685
--> 686 return _read(filepath_or_buffer, kwds)
687
688
~/build/pandas/pandas/io/parsers.py in _read(filepath_or_buffer, kwds)
433 # See https://github.com/python/mypy/issues/1297
434 fp_or_buf, _, compression, should_close = get_filepath_or_buffer(
--> 435 filepath_or_buffer, encoding, compression
436 )
437 kwds["compression"] = compression
~/build/pandas/pandas/io/common.py in get_filepath_or_buffer(filepath_or_buffer, encoding, compression, mode, storage_options)
181 if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer):
182 # TODO: fsspec can also handle HTTP via requests, but leaving this unchanged
--> 183 req = urlopen(filepath_or_buffer)
184 content_encoding = req.headers.get("Content-Encoding", None)
185 if content_encoding == "gzip":
~/build/pandas/pandas/io/common.py in urlopen(*args, **kwargs)
135 import urllib.request
136
--> 137 return urllib.request.urlopen(*args, **kwargs)
138
139
/opt/anaconda3/lib/python3.7/urllib/request.py in urlopen(url, data, timeout, cafile, capath, cadefault, context)
220 else:
221 opener = _opener
--> 222 return opener.open(url, data, timeout)
223
224 def install_opener(opener):
/opt/anaconda3/lib/python3.7/urllib/request.py in open(self, fullurl, data, timeout)
529 for processor in self.process_response.get(protocol, []):
530 meth = getattr(processor, meth_name)
--> 531 response = meth(req, response)
532
533 return response
/opt/anaconda3/lib/python3.7/urllib/request.py in http_response(self, request, response)
639 if not (200 <= code < 300):
640 response = self.parent.error(
--> 641 'http', request, response, code, msg, hdrs)
642
643 return response
/opt/anaconda3/lib/python3.7/urllib/request.py in error(self, proto, *args)
561 http_err = 0
562 args = (dict, proto, meth_name) + args
--> 563 result = self._call_chain(*args)
564 if result:
565 return result
/opt/anaconda3/lib/python3.7/urllib/request.py in _call_chain(self, chain, kind, meth_name, *args)
501 for handler in handlers:
502 func = getattr(handler, meth_name)
--> 503 result = func(*args)
504 if result is not None:
505 return result
/opt/anaconda3/lib/python3.7/urllib/request.py in http_error_302(self, req, fp, code, msg, headers)
753 fp.close()
754
--> 755 return self.parent.open(new, timeout=req.timeout)
756
757 http_error_301 = http_error_303 = http_error_307 = http_error_302
/opt/anaconda3/lib/python3.7/urllib/request.py in open(self, fullurl, data, timeout)
523 req = meth(req)
524
--> 525 response = self._open(req, data)
526
527 # post-process response
/opt/anaconda3/lib/python3.7/urllib/request.py in _open(self, req, data)
541 protocol = req.type
542 result = self._call_chain(self.handle_open, protocol, protocol +
--> 543 '_open', req)
544 if result:
545 return result
/opt/anaconda3/lib/python3.7/urllib/request.py in _call_chain(self, chain, kind, meth_name, *args)
501 for handler in handlers:
502 func = getattr(handler, meth_name)
--> 503 result = func(*args)
504 if result is not None:
505 return result
/opt/anaconda3/lib/python3.7/urllib/request.py in https_open(self, req)
1360 def https_open(self, req):
1361 return self.do_open(http.client.HTTPSConnection, req,
-> 1362 context=self._context, check_hostname=self._check_hostname)
1363
1364 https_request = AbstractHTTPHandler.do_request_
/opt/anaconda3/lib/python3.7/urllib/request.py in do_open(self, http_class, req, **http_conn_args)
1319 encode_chunked=req.has_header('Transfer-encoding'))
1320 except OSError as err: # timeout error
-> 1321 raise URLError(err)
1322 r = h.getresponse()
1323 except:
URLError: <urlopen error [Errno 110] Connection timed out>
In [5]: tips.head()
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-5-53a0cf752a4f> in <module>
----> 1 tips.head()
NameError: name 'tips' is not defined
SELECT¶
In SQL, selection is done using a comma-separated list of columns you’d like to select (or a *
to select all columns):
SELECT total_bill, tip, smoker, time
FROM tips
LIMIT 5;
With pandas, column selection is done by passing a list of column names to your DataFrame:
In [6]: tips[['total_bill', 'tip', 'smoker', 'time']].head(5)
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-6-0de747740afe> in <module>
----> 1 tips[['total_bill', 'tip', 'smoker', 'time']].head(5)
NameError: name 'tips' is not defined
Calling the DataFrame without the list of column names would display all columns (akin to SQL’s
*
).
In SQL, you can add a calculated column:
SELECT *, tip/total_bill as tip_rate
FROM tips
LIMIT 5;
With pandas, you can use the DataFrame.assign()
method of a DataFrame to append a new column:
In [7]: tips.assign(tip_rate=tips['tip'] / tips['total_bill']).head(5)
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-7-826e18003900> in <module>
----> 1 tips.assign(tip_rate=tips['tip'] / tips['total_bill']).head(5)
NameError: name 'tips' is not defined
WHERE¶
Filtering in SQL is done via a WHERE clause.
SELECT *
FROM tips
WHERE time = 'Dinner'
LIMIT 5;
DataFrames can be filtered in multiple ways; the most intuitive of which is using boolean indexing
In [8]: tips[tips['time'] == 'Dinner'].head(5)
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-8-fc48c260e052> in <module>
----> 1 tips[tips['time'] == 'Dinner'].head(5)
NameError: name 'tips' is not defined
The above statement is simply passing a Series
of True/False objects to the DataFrame,
returning all rows with True.
In [9]: is_dinner = tips['time'] == 'Dinner'
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-9-dfe63ed92808> in <module>
----> 1 is_dinner = tips['time'] == 'Dinner'
NameError: name 'tips' is not defined
In [10]: is_dinner.value_counts()
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-10-900d99c3802f> in <module>
----> 1 is_dinner.value_counts()
NameError: name 'is_dinner' is not defined
In [11]: tips[is_dinner].head(5)
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-11-7cfab4b2f27f> in <module>
----> 1 tips[is_dinner].head(5)
NameError: name 'tips' is not defined
Just like SQL’s OR and AND, multiple conditions can be passed to a DataFrame using | (OR) and & (AND).
-- tips of more than $5.00 at Dinner meals
SELECT *
FROM tips
WHERE time = 'Dinner' AND tip > 5.00;
# tips of more than $5.00 at Dinner meals
In [12]: tips[(tips['time'] == 'Dinner') & (tips['tip'] > 5.00)]
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-12-4ef9dbe7035d> in <module>
----> 1 tips[(tips['time'] == 'Dinner') & (tips['tip'] > 5.00)]
NameError: name 'tips' is not defined
-- tips by parties of at least 5 diners OR bill total was more than $45
SELECT *
FROM tips
WHERE size >= 5 OR total_bill > 45;
# tips by parties of at least 5 diners OR bill total was more than $45
In [13]: tips[(tips['size'] >= 5) | (tips['total_bill'] > 45)]
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-13-4338232c82b5> in <module>
----> 1 tips[(tips['size'] >= 5) | (tips['total_bill'] > 45)]
NameError: name 'tips' is not defined
NULL checking is done using the notna()
and isna()
methods.
In [14]: frame = pd.DataFrame({'col1': ['A', 'B', np.NaN, 'C', 'D'],
....: 'col2': ['F', np.NaN, 'G', 'H', 'I']})
....:
In [15]: frame
Out[15]:
col1 col2
0 A F
1 B NaN
2 NaN G
3 C H
4 D I
Assume we have a table of the same structure as our DataFrame above. We can see only the records
where col2
IS NULL with the following query:
SELECT *
FROM frame
WHERE col2 IS NULL;
In [16]: frame[frame['col2'].isna()]
Out[16]:
col1 col2
1 B NaN
Getting items where col1
IS NOT NULL can be done with notna()
.
SELECT *
FROM frame
WHERE col1 IS NOT NULL;
In [17]: frame[frame['col1'].notna()]
Out[17]:
col1 col2
0 A F
1 B NaN
3 C H
4 D I
GROUP BY¶
In pandas, SQL’s GROUP BY operations are performed using the similarly named
groupby()
method. groupby()
typically refers to a
process where we’d like to split a dataset into groups, apply some function (typically aggregation)
, and then combine the groups together.
A common SQL operation would be getting the count of records in each group throughout a dataset. For instance, a query getting us the number of tips left by sex:
SELECT sex, count(*)
FROM tips
GROUP BY sex;
/*
Female 87
Male 157
*/
The pandas equivalent would be:
In [18]: tips.groupby('sex').size()
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-18-02c58371e730> in <module>
----> 1 tips.groupby('sex').size()
NameError: name 'tips' is not defined
Notice that in the pandas code we used size()
and not
count()
. This is because
count()
applies the function to each column, returning
the number of not null
records within each.
In [19]: tips.groupby('sex').count()
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-19-a9454ebb58fc> in <module>
----> 1 tips.groupby('sex').count()
NameError: name 'tips' is not defined
Alternatively, we could have applied the count()
method
to an individual column:
In [20]: tips.groupby('sex')['total_bill'].count()
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-20-adc5242bcc56> in <module>
----> 1 tips.groupby('sex')['total_bill'].count()
NameError: name 'tips' is not defined
Multiple functions can also be applied at once. For instance, say we’d like to see how tip amount
differs by day of the week - agg()
allows you to pass a dictionary
to your grouped DataFrame, indicating which functions to apply to specific columns.
SELECT day, AVG(tip), COUNT(*)
FROM tips
GROUP BY day;
/*
Fri 2.734737 19
Sat 2.993103 87
Sun 3.255132 76
Thur 2.771452 62
*/
In [21]: tips.groupby('day').agg({'tip': np.mean, 'day': np.size})
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-21-3f94d9cd254c> in <module>
----> 1 tips.groupby('day').agg({'tip': np.mean, 'day': np.size})
NameError: name 'tips' is not defined
Grouping by more than one column is done by passing a list of columns to the
groupby()
method.
SELECT smoker, day, COUNT(*), AVG(tip)
FROM tips
GROUP BY smoker, day;
/*
smoker day
No Fri 4 2.812500
Sat 45 3.102889
Sun 57 3.167895
Thur 45 2.673778
Yes Fri 15 2.714000
Sat 42 2.875476
Sun 19 3.516842
Thur 17 3.030000
*/
In [22]: tips.groupby(['smoker', 'day']).agg({'tip': [np.size, np.mean]})
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-22-e5dc526454fc> in <module>
----> 1 tips.groupby(['smoker', 'day']).agg({'tip': [np.size, np.mean]})
NameError: name 'tips' is not defined
JOIN¶
JOINs can be performed with join()
or merge()
. By default,
join()
will join the DataFrames on their indices. Each method has
parameters allowing you to specify the type of join to perform (LEFT, RIGHT, INNER, FULL) or the
columns to join on (column names or indices).
In [23]: df1 = pd.DataFrame({'key': ['A', 'B', 'C', 'D'],
....: 'value': np.random.randn(4)})
....:
In [24]: df2 = pd.DataFrame({'key': ['B', 'D', 'D', 'E'],
....: 'value': np.random.randn(4)})
....:
Assume we have two database tables of the same name and structure as our DataFrames.
Now let’s go over the various types of JOINs.
INNER JOIN¶
SELECT *
FROM df1
INNER JOIN df2
ON df1.key = df2.key;
# merge performs an INNER JOIN by default
In [25]: pd.merge(df1, df2, on='key')
Out[25]:
key value_x value_y
0 B -0.282863 1.212112
1 D -1.135632 -0.173215
2 D -1.135632 0.119209
merge()
also offers parameters for cases when you’d like to join one DataFrame’s
column with another DataFrame’s index.
In [26]: indexed_df2 = df2.set_index('key')
In [27]: pd.merge(df1, indexed_df2, left_on='key', right_index=True)
Out[27]:
key value_x value_y
1 B -0.282863 1.212112
3 D -1.135632 -0.173215
3 D -1.135632 0.119209
LEFT OUTER JOIN¶
-- show all records from df1
SELECT *
FROM df1
LEFT OUTER JOIN df2
ON df1.key = df2.key;
# show all records from df1
In [28]: pd.merge(df1, df2, on='key', how='left')
Out[28]:
key value_x value_y
0 A 0.469112 NaN
1 B -0.282863 1.212112
2 C -1.509059 NaN
3 D -1.135632 -0.173215
4 D -1.135632 0.119209
RIGHT JOIN¶
-- show all records from df2
SELECT *
FROM df1
RIGHT OUTER JOIN df2
ON df1.key = df2.key;
# show all records from df2
In [29]: pd.merge(df1, df2, on='key', how='right')
Out[29]:
key value_x value_y
0 B -0.282863 1.212112
1 D -1.135632 -0.173215
2 D -1.135632 0.119209
3 E NaN -1.044236
FULL JOIN¶
pandas also allows for FULL JOINs, which display both sides of the dataset, whether or not the joined columns find a match. As of writing, FULL JOINs are not supported in all RDBMS (MySQL).
-- show all records from both tables
SELECT *
FROM df1
FULL OUTER JOIN df2
ON df1.key = df2.key;
# show all records from both frames
In [30]: pd.merge(df1, df2, on='key', how='outer')
Out[30]:
key value_x value_y
0 A 0.469112 NaN
1 B -0.282863 1.212112
2 C -1.509059 NaN
3 D -1.135632 -0.173215
4 D -1.135632 0.119209
5 E NaN -1.044236
UNION¶
UNION ALL can be performed using concat()
.
In [31]: df1 = pd.DataFrame({'city': ['Chicago', 'San Francisco', 'New York City'],
....: 'rank': range(1, 4)})
....:
In [32]: df2 = pd.DataFrame({'city': ['Chicago', 'Boston', 'Los Angeles'],
....: 'rank': [1, 4, 5]})
....:
SELECT city, rank
FROM df1
UNION ALL
SELECT city, rank
FROM df2;
/*
city rank
Chicago 1
San Francisco 2
New York City 3
Chicago 1
Boston 4
Los Angeles 5
*/
In [33]: pd.concat([df1, df2])
Out[33]:
city rank
0 Chicago 1
1 San Francisco 2
2 New York City 3
0 Chicago 1
1 Boston 4
2 Los Angeles 5
SQL’s UNION is similar to UNION ALL, however UNION will remove duplicate rows.
SELECT city, rank
FROM df1
UNION
SELECT city, rank
FROM df2;
-- notice that there is only one Chicago record this time
/*
city rank
Chicago 1
San Francisco 2
New York City 3
Boston 4
Los Angeles 5
*/
In pandas, you can use concat()
in conjunction with
drop_duplicates()
.
In [34]: pd.concat([df1, df2]).drop_duplicates()
Out[34]:
city rank
0 Chicago 1
1 San Francisco 2
2 New York City 3
1 Boston 4
2 Los Angeles 5
pandas equivalents for some SQL analytic and aggregate functions¶
Top n rows with offset¶
-- MySQL
SELECT * FROM tips
ORDER BY tip DESC
LIMIT 10 OFFSET 5;
In [35]: tips.nlargest(10 + 5, columns='tip').tail(10)
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-35-470f6bc86f8f> in <module>
----> 1 tips.nlargest(10 + 5, columns='tip').tail(10)
NameError: name 'tips' is not defined
Top n rows per group¶
-- Oracle's ROW_NUMBER() analytic function
SELECT * FROM (
SELECT
t.*,
ROW_NUMBER() OVER(PARTITION BY day ORDER BY total_bill DESC) AS rn
FROM tips t
)
WHERE rn < 3
ORDER BY day, rn;
In [36]: (tips.assign(rn=tips.sort_values(['total_bill'], ascending=False)
....: .groupby(['day'])
....: .cumcount() + 1)
....: .query('rn < 3')
....: .sort_values(['day', 'rn']))
....:
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-36-8f10c7fd9b13> in <module>
----> 1 (tips.assign(rn=tips.sort_values(['total_bill'], ascending=False)
2 .groupby(['day'])
3 .cumcount() + 1)
4 .query('rn < 3')
5 .sort_values(['day', 'rn']))
NameError: name 'tips' is not defined
the same using rank(method=’first’) function
In [37]: (tips.assign(rnk=tips.groupby(['day'])['total_bill']
....: .rank(method='first', ascending=False))
....: .query('rnk < 3')
....: .sort_values(['day', 'rnk']))
....:
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-37-59cdc5622ab0> in <module>
----> 1 (tips.assign(rnk=tips.groupby(['day'])['total_bill']
2 .rank(method='first', ascending=False))
3 .query('rnk < 3')
4 .sort_values(['day', 'rnk']))
NameError: name 'tips' is not defined
-- Oracle's RANK() analytic function
SELECT * FROM (
SELECT
t.*,
RANK() OVER(PARTITION BY sex ORDER BY tip) AS rnk
FROM tips t
WHERE tip < 2
)
WHERE rnk < 3
ORDER BY sex, rnk;
Let’s find tips with (rank < 3) per gender group for (tips < 2).
Notice that when using rank(method='min')
function
rnk_min remains the same for the same tip
(as Oracle’s RANK() function)
In [38]: (tips[tips['tip'] < 2]
....: .assign(rnk_min=tips.groupby(['sex'])['tip']
....: .rank(method='min'))
....: .query('rnk_min < 3')
....: .sort_values(['sex', 'rnk_min']))
....:
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-38-c3c42fb11f53> in <module>
----> 1 (tips[tips['tip'] < 2]
2 .assign(rnk_min=tips.groupby(['sex'])['tip']
3 .rank(method='min'))
4 .query('rnk_min < 3')
5 .sort_values(['sex', 'rnk_min']))
NameError: name 'tips' is not defined
UPDATE¶
UPDATE tips
SET tip = tip*2
WHERE tip < 2;
In [39]: tips.loc[tips['tip'] < 2, 'tip'] *= 2
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-39-f7e81bb0cba5> in <module>
----> 1 tips.loc[tips['tip'] < 2, 'tip'] *= 2
NameError: name 'tips' is not defined
DELETE¶
DELETE FROM tips
WHERE tip > 9;
In pandas we select the rows that should remain, instead of deleting them
In [40]: tips = tips.loc[tips['tip'] <= 9]
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-40-e3d668c4397d> in <module>
----> 1 tips = tips.loc[tips['tip'] <= 9]
NameError: name 'tips' is not defined