Skip to content

Commit befd0e3

Browse files
[Issue 76] Add json_normalize (#82)
1 parent 8344c61 commit befd0e3

File tree

5 files changed

+22
-8
lines changed

5 files changed

+22
-8
lines changed

tests/snippets/test_pandas.py

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# flake8: noqa: F841
2-
from typing import Union
2+
from typing import Any, Dict, List, Union
33

44
import pandas as pd
55

@@ -45,4 +45,16 @@ def test_types_concat() -> None:
4545

4646
rdf1: pd.DataFrame = pd.concat({'a': df, 'b': df2})
4747
rdf2: pd.DataFrame = pd.concat({1: df, 2: df2})
48-
rdf3: pd.DataFrame = pd.concat({1: df, None: df2})
48+
rdf3: pd.DataFrame = pd.concat({1: df, None: df2})
49+
50+
51+
def test_types_json_normalize() -> None:
52+
data1: List[Dict[str, Any]] = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}},
53+
{'name': {'given': 'Mose', 'family': 'Regner'}},
54+
{'id': 2, 'name': 'Faye Raker'}]
55+
df1: pd.DataFrame = pd.json_normalize(data=data1)
56+
df2: pd.DataFrame = pd.json_normalize(data=data1, max_level=0, sep=";")
57+
df3: pd.DataFrame = pd.json_normalize(data=data1, meta_prefix="id", record_prefix="name", errors='raise')
58+
df4: pd.DataFrame = pd.json_normalize(data=data1, record_path=None, meta='id')
59+
data2: Dict[str, Any] = {'name': {'given': 'Mose', 'family': 'Regner'}}
60+
df5: pd.DataFrame = pd.json_normalize(data=data2)

third_party/3/pandas/__init__.pyi

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ from pandas.core.api import BooleanDtype as BooleanDtype, Categorical as Categor
33
from pandas.core.arrays.sparse import SparseDtype as SparseDtype
44
from pandas.core.computation.api import eval as eval
55
from pandas.core.reshape.api import concat as concat, crosstab as crosstab, cut as cut, get_dummies as get_dummies, lreshape as lreshape, melt as melt, merge as merge, merge_asof as merge_asof, merge_ordered as merge_ordered, pivot as pivot, pivot_table as pivot_table, qcut as qcut, wide_to_long as wide_to_long
6-
from pandas.io.api import ExcelFile as ExcelFile, ExcelWriter as ExcelWriter, HDFStore as HDFStore, read_clipboard as read_clipboard, read_csv as read_csv, read_excel as read_excel, read_feather as read_feather, read_fwf as read_fwf, read_gbq as read_gbq, read_hdf as read_hdf, read_html as read_html, read_json as read_json, read_orc as read_orc, read_parquet as read_parquet, read_pickle as read_pickle, read_sas as read_sas, read_spss as read_spss, read_sql as read_sql, read_sql_query as read_sql_query, read_sql_table as read_sql_table, read_stata as read_stata, read_table as read_table, to_pickle as to_pickle
6+
from pandas.io.api import ExcelFile as ExcelFile, ExcelWriter as ExcelWriter, HDFStore as HDFStore, read_clipboard as read_clipboard, read_csv as read_csv, read_excel as read_excel, read_feather as read_feather, read_fwf as read_fwf, read_gbq as read_gbq, read_hdf as read_hdf, read_html as read_html, read_json as read_json, json_normalize as json_normalize, read_orc as read_orc, read_parquet as read_parquet, read_pickle as read_pickle, read_sas as read_sas, read_spss as read_spss, read_sql as read_sql, read_sql_query as read_sql_query, read_sql_table as read_sql_table, read_stata as read_stata, read_table as read_table, to_pickle as to_pickle
77
from pandas.tseries import offsets as offsets
88
from pandas.tseries.api import infer_freq as infer_freq
99
from pandas.util._print_versions import show_versions as show_versions

third_party/3/pandas/io/api.pyi

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ from pandas.io.excel import ExcelFile as ExcelFile, ExcelWriter as ExcelWriter,
33
from pandas.io.feather_format import read_feather as read_feather
44
from pandas.io.gbq import read_gbq as read_gbq
55
from pandas.io.html import read_html as read_html
6-
from pandas.io.json import read_json as read_json
6+
from pandas.io.json import read_json as read_json, json_normalize as json_normalize
77
from pandas.io.orc import read_orc as read_orc
88
from pandas.io.parquet import read_parquet as read_parquet
99
from pandas.io.parsers import read_csv as read_csv, read_fwf as read_fwf, read_table as read_table
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
from pandas.io.json._json import dumps as dumps, loads as loads, read_json as read_json, to_json as to_json
2-
from pandas.io.json._normalize import _json_normalize as _json_normalize, json_normalize as json_normalize
2+
from pandas.io.json._normalize import json_normalize as json_normalize
33
from pandas.io.json._table_schema import build_table_schema as build_table_schema
Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
1-
from typing import Any, Optional
1+
from typing import Any, Dict, List, Literal, Optional, Union
2+
3+
from pandas import DataFrame
4+
25

36
def convert_to_line_delimits(s: Any) -> Any: ...
47
def nested_to_record(ds: Any, prefix: str=..., sep: str=..., level: int=..., max_level: Optional[int]=...) -> Any: ...
58

6-
json_normalize: Any
7-
_json_normalize: Any
9+
def json_normalize(data: Union[Dict[Any, Any], List[Dict[Any, Any]]], record_path: Optional[Union[str, List[str]]] = ..., meta: Optional[Union[str, List[Union[str, List[str]]]]] = ..., meta_prefix: Optional[str] = ..., record_prefix: Optional[str] = ..., errors: Literal['raise', 'ignore'] = ..., sep: str = ..., max_level: Optional[int] = ...) -> DataFrame: ...

0 commit comments

Comments
 (0)