diff --git a/README.md b/README.md index a43e495..35b6f35 100644 --- a/README.md +++ b/README.md @@ -3201,7 +3201,7 @@ Name: a, dtype: int64 #### Series — Aggregate, Transform, Map: ```python - = .sum/max/mean/idxmax/all/count() # Or: .agg(lambda : ) + = .sum/max/mean/std/idxmax/count() # Or: .agg(lambda : ) = .rank/diff/cumsum/ffill/interpol…() # Or: .agg/transform(lambda : ) = .isna/fillna/isin([]) # Or: .agg/transform/map(lambda : ) ``` @@ -3314,7 +3314,7 @@ c 6 7 #### DataFrame — Aggregate, Transform, Map: ```python - = .sum/max/mean/idxmax/all/count() # Or: .apply/agg(lambda : ) + = .sum/max/mean/std/idxmax/count() # Or: .apply/agg(lambda : ) = .rank/diff/cumsum/ffill/interpo…() # Or: .apply/agg/transform(lambda : ) = .isna/fillna/isin([]) # Or: .applymap(lambda : ) ``` @@ -3355,13 +3355,13 @@ c 6 7 = pd.read_json/pickle() # Also io.StringIO(), io.BytesIO(). = pd.read_csv/excel() # Also `header/index_col/dtype/usecols/…=`. = pd.read_html() # Raises ImportError if webpage has zero tables. - = pd.read_parquet/feather/hdf() # Read_hdf() accepts `key=` argument. + = pd.read_parquet/feather/hdf() # Function read_hdf() accepts `key=`. = pd.read_sql('', ) # Pass SQLite3/Alchemy connection (see #SQLite). ``` ```python .to_json/csv/html/parquet/latex() # Returns a string/bytes if path is omitted. -.to_pickle/excel/feather/hdf() # To_hdf() requires `key=` argument. +.to_pickle/excel/feather/hdf() # Method to_hdf() requires `key=`. .to_sql('', ) # Also `if_exists='fail/replace/append'`. ``` * **`'$ pip3 install "pandas[excel]" odfpy lxml pyarrow'` installs dependencies.** @@ -3380,7 +3380,7 @@ c 6 7 ``` ```python - = .sum/max/mean/idxmax/all() # Or: .agg(lambda : ) + = .sum/max/mean/std/idxmax/count() # Or: .agg(lambda : ) = .rank/diff/cumsum/ffill() # Or: .transform(lambda : ) = .fillna() # Or: .transform(lambda : ) ``` diff --git a/index.html b/index.html index ea6618c..2f4deb3 100644 --- a/index.html +++ b/index.html @@ -2618,7 +2618,7 @@ Name: a, dtype: int64
  • Pandas uses NumPy types like 'np.int64'. Series is converted to 'float64' if we assign np.nan to any item. Use '<S>.astype(<str/type>)' to get converted Series.
  • Series will silently overflow if we run 'pd.Series([100], dtype="int8") + 100'!
  • -

    Series — Aggregate, Transform, Map:

    <el> = <S>.sum/max/mean/idxmax/all/count()     # Or: <S>.agg(lambda <S>: <el>)
    +

    Series — Aggregate, Transform, Map:

    <el> = <S>.sum/max/mean/std/idxmax/count()     # Or: <S>.agg(lambda <S>: <el>)
     <S>  = <S>.rank/diff/cumsum/ffill/interpol…()  # Or: <S>.agg/transform(lambda <S>: <S>)
     <S>  = <S>.isna/fillna/isin([<el/coll>])       # Or: <S>.agg/transform/map(lambda <el>: <el>)
     
    @@ -2707,7 +2707,7 @@ c 6 7 ┃ │ c . . 6 7 │ │ │ treated as a column. ┃ ┗━━━━━━━━━━━━━━━━━━━━━━━┷━━━━━━━━━━━━━━━┷━━━━━━━━━━━━┷━━━━━━━━━━━━┷━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
    -

    DataFrame — Aggregate, Transform, Map:

    <S>  = <DF>.sum/max/mean/idxmax/all/count()    # Or: <DF>.apply/agg(lambda <S>: <el>)
    +

    DataFrame — Aggregate, Transform, Map:

    <S>  = <DF>.sum/max/mean/std/idxmax/count()    # Or: <DF>.apply/agg(lambda <S>: <el>)
     <DF> = <DF>.rank/diff/cumsum/ffill/interpo…()  # Or: <DF>.apply/agg/transform(lambda <S>: <S>)
     <DF> = <DF>.isna/fillna/isin([<el/coll>])      # Or: <DF>.applymap(lambda <el>: <el>)
     
    @@ -2743,12 +2743,12 @@ c 6 7

    File Formats

    <S/DF> = pd.read_json/pickle(<path/url/file>)  # Also io.StringIO(<str>), io.BytesIO(<bytes>).
     <DF>   = pd.read_csv/excel(<path/url/file>)    # Also `header/index_col/dtype/usecols/…=<obj>`.
     <list> = pd.read_html(<path/url/file>)         # Raises ImportError if webpage has zero tables.
    -<S/DF> = pd.read_parquet/feather/hdf(<path…>)  # Read_hdf() accepts `key=<s/df_name>` argument.
    +<S/DF> = pd.read_parquet/feather/hdf(<path…>)  # Function read_hdf() accepts `key=<s/df_name>`.
     <DF>   = pd.read_sql('<table/query>', <conn>)  # Pass SQLite3/Alchemy connection (see #SQLite).
     
    <DF>.to_json/csv/html/parquet/latex(<path>)    # Returns a string/bytes if path is omitted.
    -<DF>.to_pickle/excel/feather/hdf(<path>)       # To_hdf() requires `key=<s/df_name>` argument.
    +<DF>.to_pickle/excel/feather/hdf(<path>)       # Method to_hdf() requires `key=<s/df_name>`.
     <DF>.to_sql('<table_name>', <connection>)      # Also `if_exists='fail/replace/append'`.
     
      @@ -2764,7 +2764,7 @@ c 6 7
    -
    <DF> = <GB>.sum/max/mean/idxmax/all()          # Or: <GB>.agg(lambda <S>: <el>)
    +
    <DF> = <GB>.sum/max/mean/std/idxmax/count()    # Or: <GB>.agg(lambda <S>: <el>)
     <DF> = <GB>.rank/diff/cumsum/ffill()           # Or: <GB>.transform(lambda <S>: <S>)
     <DF> = <GB>.fillna(<el>)                       # Or: <GB>.transform(lambda <S>: <S>)