diff --git a/.gitignore b/.gitignore index 639a9e9..4662532 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,7 @@ __pycache__ /.vscode docs/graphs/png/*.png .doit.db.* -pynance/*.pyc +*.pyc .pytest_cache /.hypothesis .coverage diff --git a/pynance/dash_viz/plot_flow_test.py b/pynance/dash_viz/plot_flow_test.py index deb4906..d130556 100644 --- a/pynance/dash_viz/plot_flow_test.py +++ b/pynance/dash_viz/plot_flow_test.py @@ -32,7 +32,7 @@ def test_onselect_csvtype(self): for expected, selected in zip(onselect_response, dropdown_values): response = onselect_csvtype(selected) - response_dict = json.loads(response.data.decode()) + response_dict = json.loads(response) # .data.decode()) is_enabled = not response_dict["response"]["props"]["disabled"] self.assertEqual(expected, is_enabled) @@ -140,7 +140,7 @@ def test_update_output(self): bytestr = self._read_sample_file_like_uploaded() response = update_output(bytestr, "DKBCash") - response_dict = json.loads(response.data.decode()) + response_dict = json.loads(response) # .data.decode()) res_charts = response_dict["response"]["props"]["figure"]["data"] diff --git a/pynance/database.py b/pynance/database.py new file mode 100644 index 0000000..615d1c8 --- /dev/null +++ b/pynance/database.py @@ -0,0 +1,204 @@ +""" +Explain the classes briefly. Elaborate on Storage +""" + +import sqlite3 +import numpy as np +from .definitions import COLUMNS + + +def exists_table(conn, table_name): + """ + Returns True if and only if 'table_name' is an existing table. + """ + + result = conn.execute( + 'select count(*) from sqlite_master where type="table" and name="{}"'.format(table_name) + ).fetchall() + return result[0][0] == 1 + + +def generate_sqlite_columns_definitions(): + """ + Converts definitions.COLUMNS into the column definitions of a sqlite table. By column definitions, + we mean the part of a CREATE TABLE statement that defines the columns: + + CREATE TABLE my_table_name () + + Returns the column definitions as string + """ + + type_lookup_dict = { + str: 'TEXT', + np.datetime64: 'TEXT', + np.float64: 'REAL' + } + + def name_type_to_string(x): + col_name, col_type = x + if col_type not in type_lookup_dict: + raise ValueError( + "Don't know which sqlite type '{}' is".format(col_type)) + return '{} {}'.format(col_name, type_lookup_dict[col_type]) + + return ', '.join(map(name_type_to_string, COLUMNS.items())) + + +class LowLevelConnection(object): + """ + Class that handles low-level database connection. Makes sure the expected table strucutre exists. + Should be used in with-statements. + """ + + # Schema evolution should be handled later once it is needed + SUPPORTED_SCHEMA_VERSIONS = [1] + + TABLE_SCHEMA_VERSION = 'schema' + TABLE_TRANSACTIONS = 'transactions' + ID_COLUMN = 'id' + + def _get_db_conn(self): + """ + Get the connection to the sqlite database. We use the 'DEFERRED' isolation level. This + is the default in Python 3 anyways, in Python 2 the default is autocommit mode. The DEFERRED + isolation level seems appropriate in this case. See also + * https://www.sqlite.org/lang_transaction.html + """ + return sqlite3.connect( + self.db_file_name, + isolation_level='DEFERRED' + ) + + def __init__(self, schema_version, db_file_name): + """ + Parameters: + * `schema_version`: Integer denoting the schema version. + * `db_file_name`: This DB file will be created if it does not yet exist. + """ + assert schema_version in LowLevelConnection.SUPPORTED_SCHEMA_VERSIONS + self.db_file_name = db_file_name + + connection = self._get_db_conn() + with connection: + if not exists_table(connection, LowLevelConnection.TABLE_SCHEMA_VERSION): + connection.execute('CREATE TABLE IF NOT EXISTS {} (version INTEGER)'.format( + LowLevelConnection.TABLE_SCHEMA_VERSION)) + connection.execute('INSERT INTO {} VALUES (1)'.format( + LowLevelConnection.TABLE_SCHEMA_VERSION)) + + if not exists_table(connection, LowLevelConnection.TABLE_TRANSACTIONS): + connection.execute('CREATE TABLE IF NOT EXISTS {} ({})'.format( + LowLevelConnection.TABLE_TRANSACTIONS, + generate_sqlite_columns_definitions() + )) + connection.execute('CREATE INDEX date_index ON {} ({})'.format( + LowLevelConnection.TABLE_TRANSACTIONS, 'date')) + connection.execute('CREATE INDEX id ON {} ({})'.format( + LowLevelConnection.TABLE_TRANSACTIONS, LowLevelConnection.ID_COLUMN)) + + def __enter__(self): + self.conn = self._get_db_conn() + return self.conn + + def __exit__(self, _1, _2, _3): + self.conn.close() + + +class InsertTable(object): + """ + This class makes sure that a DataFrame is inserted into a temporary table of a sqlite databases. + It also makes sure that the temporary table is created in a safe way and disposed afterwards. For + this purpuse, instances of this class should be used in with statements. + """ + + @staticmethod + def create_temp_table(conn): + """Creates temporary table suitable for inserting the DataFrame and returns its name.""" + + cursor = conn.cursor() + i, table_name, go_on = 0, '', True + + while go_on: + go_on = False + table_name = 'insert_df_{}'.format(i) + try: + cursor.execute('CREATE TEMPORARY TABLE {} ({})'.format( + table_name, + generate_sqlite_columns_definitions() + )) + except sqlite3.OperationalError: + go_on = True + i += 1 + + return 'temp', table_name + + def __init__(self, conn, data_frame): + "uses conn, fetches everything from 'data_frame' into a temporary table" + + self.conn = conn + self.temp_table_schema, self.temp_table_name = InsertTable.create_temp_table( + conn) + data_frame.to_sql( + name=self.temp_table_name, + schema=self.temp_table_schema, + index=False, + con=conn, + chunksize=5000 + ) + + def __enter__(self): + return (self.temp_table_schema, self.temp_table_name) + + def __exit__(self, _1, _2, _3): + "Make sure the table is gone." + self.conn.cursor().execute('DROP TABLE {}.{}'.format( + self.temp_table_schema, self.temp_table_name + )) + + +class Storage(object): + + def __init__(self, db_file): + self.db_file = db_file + + @classmethod + def validate_dataframe_shape(cls, data_frame): + """ + asserts that the correct columns are present. + Tolerates that additional columns are present + """ + return True + + def append_dataframe(self, data_frame): + """ + asserts that the shape of the dataframe is correct + returns the part of the dataframe that is new. This part has also an ID column + """ + if not self.validate_dataframe_shape(data_frame): + raise Exception('Invalid dataframe') + + with LowLevelConnection(1, self.db_file) as conn: + with InsertTable(conn, data_frame) as insert_table: + # add existing data to insert_table + with conn: + column_keys = COLUMNS.keys() + columns_str = ','.join(column_keys) + conn.cursor().execute( + ''' + INSERT INTO %s + SELECT %s + FROM %s + ON CONFLICT (%s) DO NOTHING + ''' % (insert_table, + columns_str, + LowLevelConnection.TABLE_TRANSACTIONS, + LowLevelConnection.ID_COLUMN)) + conn.close() + # but only non-duplicates + # replace existing table by insert_table + + def load_dataframe(self): + """ + loads from db. contains ID column + """ + pass diff --git a/pynance/database_test.py b/pynance/database_test.py new file mode 100644 index 0000000..b3bce49 --- /dev/null +++ b/pynance/database_test.py @@ -0,0 +1,163 @@ +import unittest +import os.path +import shutil +from tempfile import TemporaryDirectory, TemporaryFile +import sqlite3 + +from pynance.database import generate_sqlite_columns_definitions, \ + LowLevelConnection, InsertTable +from pynance.textimporter import read_csv +from pynance.dkb import SupportedCsvTypes + + +class ColumnsDefinitionsTestCase(unittest.TestCase): + def test_it_produces_valid_string(self): + result = generate_sqlite_columns_definitions() + self.assertEqual(type(result), str) + self.assertTrue(len(result) > 0) + + def test_it_produces_valid_sql_types(self): + with TemporaryDirectory() as tmp_dir: + tmp_file = os.path.join(tmp_dir, 'test.db') + conn = sqlite3.connect(tmp_file) + column_definitions = generate_sqlite_columns_definitions() + query = 'CREATE TABLE test ({})'.format(column_definitions) + conn.execute(query) + conn.close() + + +class LowLevelConnectionTestCase(unittest.TestCase): + def test_creates_database_file_if_not_exists(self): + with TemporaryDirectory() as tmp_dir: + db_file = os.path.join(tmp_dir, 'test.db') + self.assertFalse(os.path.exists(db_file)) + with LowLevelConnection(1, db_file) as _: + pass + self.assertTrue(os.path.exists(db_file)) + + def test_opens_connection(self): + with TemporaryDirectory() as tmp_dir: + with LowLevelConnection(1, os.path.join(tmp_dir, 'test.db')) as conn: + self.assertIsNotNone(conn) + + def test_creates_expected_tables(self): + with TemporaryDirectory() as tmp_dir: + with LowLevelConnection(1, os.path.join(tmp_dir, 'test.db')) as conn: + cursor = conn.cursor() + tables = set(map( + lambda x: x[0], + cursor.execute( + 'select name from sqlite_master where type="table"').fetchall() + )) + self.assertEqual( + tables, + set([LowLevelConnection.TABLE_SCHEMA_VERSION, LowLevelConnection.TABLE_TRANSACTIONS + ])) + self.assertEqual( + [(1,)], + cursor.execute( + 'select count(*) from {}'.format(LowLevelConnection.TABLE_SCHEMA_VERSION)).fetchall() + ) + + def test_works_on_same_database_twice(self): + with TemporaryDirectory() as tmp_dir: + db_name = os.path.join(tmp_dir, 'test.db') + with LowLevelConnection(1, db_name) as _: + pass + with LowLevelConnection(1, db_name) as conn: + result = conn \ + .execute('select count(*) from {}'.format(LowLevelConnection.TABLE_SCHEMA_VERSION)) \ + .fetchall() + self.assertEqual(1, result[0][0]) + + +class InsertTableTestCase(unittest.TestCase): + + def test_create_temp_table_table_exists(self): + with TemporaryDirectory() as tmp_dir: + with LowLevelConnection(1, os.path.join(tmp_dir, 'test.db')) as conn: + table_schema, table_name = InsertTable.create_temp_table(conn) + # Fails if and only if table does not exist + conn.cursor().execute('select count(*) from {}.{}'.format(table_schema, table_name)) + + def test_create_temp_table_choses_other_table_if_exists(self): + with TemporaryDirectory() as tmp_dir: + with LowLevelConnection(1, os.path.join(tmp_dir, 'test.db')) as conn: + conn.cursor().execute('CREATE TEMPORARY TABLE insert_df_0 (id INT)') + table_schema, table_name = InsertTable.create_temp_table(conn) + self.assertEqual(table_schema, 'temp') + self.assertEqual( + table_name, 'insert_df_1', 'expected table creation to fail exactly the first time') + + def test_it_removes_the_temporary_table(self): + test_data_frame = read_csv(os.path.join( + 'pynance', 'test_data', 'dkb_cash_sample.csv'), SupportedCsvTypes.DKBCash) + # TODO: get rid of the 'drop' here + test_data_frame = test_data_frame.drop(['origin'], axis=1) + with TemporaryDirectory() as tmp_dir: + with LowLevelConnection(1, os.path.join(tmp_dir, 'test.db')) as conn: + insert_table_with_schema = '' + + def check_if_table_exists(): + conn.cursor().execute('select count(*) from {}'.format(insert_table_with_schema)) + + with InsertTable(conn, test_data_frame) as insert_table: + insert_table_with_schema = '{}.{}'.format( + insert_table[0], insert_table[1]) + check_if_table_exists() + + self.assertRaises(sqlite3.OperationalError, + check_if_table_exists) + + def test_it_works_with_dataframes_from_text_importer(self): + def run_test(csv_file, df_format): + # Get the DataFrame + self.assertTrue(os.path.isfile(csv_file)) + # TODO: Investigate what origin is good for and if we want to include it as column + # in the database as well. + data_frame = read_csv(csv_file, df_format).drop(['origin'], axis=1) + self.assertTrue(len(data_frame.index) > 0) + + # Load it into the InserTable and test this + with TemporaryDirectory() as tmp_dir: + with LowLevelConnection(1, os.path.join(tmp_dir, 'test.db')) as conn: + with InsertTable(conn, data_frame) as insert_table: + + data_frame_size = len(data_frame.index) + database_rows = conn.cursor() \ + .execute('SELECT count(*) FROM {}.{}'.format(insert_table[0], insert_table[1])).fetchall()[0][0] + + self.assertEqual( + data_frame_size, database_rows, 'not all (or more?) rows written to database') + + run_test(os.path.join('pynance', 'test_data', + 'dkb_cash_sample.csv'), SupportedCsvTypes.DKBCash) + run_test(os.path.join('pynance', 'test_data', + 'dkb_visa_sample.csv'), SupportedCsvTypes.DKBVisa) + + +class StorageTestCase(unittest.TestCase): + + def test_validate_dataframe_shape_complains_when_columns_are_missing(self): + "Assertion when columns are missing" + pass + + def test_validate_dataframe_shape_accepts_aditional_columns(self): + "Does not compain when aditional columns are present" + pass + + def test_append_dataframe_rejects_invalid_dataframes(self): + pass + + def test_append_dataframe_returns_new_parts_with_id(self): + pass + + def test_append_dataframe_returned_ids_are_the_same_as_in_load_dataframe(self): + pass + + def test_append_dataframe_duplicats_are_left_out(self): + pass + + def test_load_dataframe_works_with_new_storage_instance(self): + "implies new conn etc..." + pass diff --git a/pynance/dataframe_util.py b/pynance/dataframe_util.py new file mode 100644 index 0000000..b10b701 --- /dev/null +++ b/pynance/dataframe_util.py @@ -0,0 +1,13 @@ +from .definitions import COLUMNS +from hashlib import md5 + + +def hash_row(row): + h = md5() + for value in row: + h.update(bytes(str(value), encoding='utf8')) + return h.hexdigest() + + +def create_id_hash(new_df): + return new_df.apply(hash_row, axis=1) diff --git a/pynance/dataframe_util_test.py b/pynance/dataframe_util_test.py new file mode 100644 index 0000000..a24a32e --- /dev/null +++ b/pynance/dataframe_util_test.py @@ -0,0 +1,25 @@ +import unittest +from hypothesis import given +from datetime import date + +from .transactions import dataframe +from .dataframe_util import hash_row, create_id_hash + + +class DataframeUtilTestcase(unittest.TestCase): + + @given(df=dataframe(min_size=1, max_date=date(2000, 1, 1))) + def test_hash_row(self, df): + for i, row in df.iterrows(): + hash_result = hash_row(row) + self.assertEqual(type(hash_result), str) + self.assertEqual(len(hash_result), 32) + + @given(df=dataframe(min_size=1, max_date=date(2000, 1, 1))) + def test_create_id_hash(self, df): + result_hash_column = create_id_hash(df) + self.assertEqual(len(result_hash_column), len(df)) + + for item in result_hash_column: + self.assertEqual(type(item), str) + self.assertEqual(len(item), 32) diff --git a/pynance/definitions.py b/pynance/definitions.py new file mode 100644 index 0000000..9b97d27 --- /dev/null +++ b/pynance/definitions.py @@ -0,0 +1,23 @@ +""" +This module contains common definitions that are shared across other pynance +modules. +""" + +import numpy as np + +IMMUTABLE_COLUMNS = { + "date": np.datetime64, + "sender_account": str, + "receiver_account": str, + "text": str, + "amount": np.float64, + "total_balance": np.float64, + "currency": str, + "origin": str +} + +# see issue #5 and #6 +# use numpy types for numbers, because that's what pandas likes +COLUMNS = dict(id=str, + category=str, + tags=str, **IMMUTABLE_COLUMNS) diff --git a/pynance/storage_test.py b/pynance/storage_test.py new file mode 100644 index 0000000..562662c --- /dev/null +++ b/pynance/storage_test.py @@ -0,0 +1,108 @@ +import unittest +import os + +import numpy as np +import pandas as pd +from pandas.testing import assert_frame_equal +from tempfile import TemporaryDirectory + + +from .database import Storage +from .textimporter import read_csv +from .dkb import SupportedCsvTypes +from .definitions import COLUMNS + + +class StorageTestCase(unittest.TestCase): + def _read_dummy_file_dkbcash_small(self): + dummyfile_dkbcash_small = os.path.join("pynance", + "test_data", + "dkb_cash_sample.csv") + assert os.path.isfile(dummyfile_dkbcash_small) + + return read_csv(dummyfile_dkbcash_small, + SupportedCsvTypes.DKBCash) + + def _read_dummy_file_dkbvisa_small(self): + dummyfile_dkbvisa_small = os.path.join("pynance", + "test_data", + "dkb_visa_sample.csv") + assert os.path.isfile(dummyfile_dkbvisa_small) + + return read_csv(dummyfile_dkbvisa_small, + SupportedCsvTypes.DKBVisa) + + def _assert_frame_relevant_columns_equal(self, df1, df2): + assert_frame_equal(df1[COLUMNS], df2[COLUMNS]) + + def setUp(self): + self.tempdir = TemporaryDirectory() + self.db_file = os.path.join(self.tempdir.name, "test.sqlite") + + def tearDown(self): + self.tempdir.cleanup() + + def test_init_storage(self): + storage = Storage(self.db_file) + assert storage is not None + + def test_append_dataframe_dkb_cash_small(self): + # delete file to make sure starting from scratch + + storage = Storage(self.db_file) + df = self._read_dummy_file_dkbcash_small() + newdf = storage.append_dataframe(df) + + self._assert_frame_relevant_columns_equal(df, newdf) + + def test_append_dataframe_dkb_cash_and_visa(self): + # delete file to make sure starting from scratch + + storage = Storage(self.db_file) + df_cash = self._read_dummy_file_dkbcash_small() + df_visa = self._read_dummy_file_dkbcash_small() + + storage.append_dataframe(df_cash) + storage.append_dataframe(df_visa) + + df_loaded = storage.load_dataframe() + + df_expected = df_cash.append(df_visa).sort_values(by="date", + ascending=False) + + self._assert_frame_relevant_columns_equal(df_loaded, df_expected) + + def test_load_dataframe(self): + # delete file to make sure starting from scratch + + storage = Storage(self.db_file) + df = self._read_dummy_file_dkbcash_small() + newdf = storage.append_dataframe(df) + loaded_df = storage.load_dataframe() + + self._assert_frame_relevant_columns_equal(df, loaded_df) + + def test_append_dataframe_ignores_duplicates(self): + # delete file to make sure starting from scratch + + storage = Storage(self.db_file) + df = self._read_dummy_file_dkbcash_small() + + # appending twice + newdf = storage.append_dataframe(df) + newdf2 = storage.append_dataframe(df) + + loaded_df = storage.load_dataframe() + + self._assert_frame_relevant_columns_equal(df, loaded_df) + + def test_append_invalid_dataframe_fails(self): + random_df = pd.DataFrame(np.random.randn(100, 2), + columns=['colA', 'colB']) + + storage = Storage(self.db_file) + + def append_invalid(): + return storage.append_dataframe(random_df) + + self.assertRaises(Exception, append_invalid) diff --git a/pynance/textimporter.py b/pynance/textimporter.py index 77f5e25..9cf06a1 100644 --- a/pynance/textimporter.py +++ b/pynance/textimporter.py @@ -4,6 +4,9 @@ import pandas as pd import numpy as np +from .definitions import COLUMNS +from .dataframe_util import create_id_hash + def read_csv(filepath_or_buffer, description): """ @@ -79,7 +82,7 @@ def read_csv(filepath_or_buffer, description): amounts = new_df['amount'].values new_df['total_balance'] = amounts_to_balances(amounts, final_total_balance) - + new_df['id'] = create_id_hash(new_df) return new_df @@ -198,20 +201,3 @@ class UnsupportedCsvFormatException(IOError): a setting that does not fit the actual file """ pass - - -# STATIC DEFINITIONS below this line ################ - -# see issue #5 and #6 -# use numpy types for numbers, because that's what pandas likes -COLUMNS = { - "date": np.datetime64, - "sender_account": str, - "receiver_account": str, - "text": str, - "amount": np.float64, - "total_balance": np.float64, - "currency": str, - "category": str, - "tags": str, - "origin": str} diff --git a/pynance/transactions.py b/pynance/transactions.py new file mode 100644 index 0000000..cd3eb34 --- /dev/null +++ b/pynance/transactions.py @@ -0,0 +1,75 @@ +""" +Contains transaction test strategies. +""" + +import hypothesis.strategies as st +import datetime +import pandas as pd +import numpy as np + +from .dataframe_util import create_id_hash + +KNOWN_CURRENCIES = ['EUR', 'USD'] +ALPHABET = list( + map(str, 'abcdefghijklmnopqrstuvwzyz ABCDEFGHIJKLMNOPQRSTUVWZYZ0123456789äüöß')) + + +@st.composite +def single_transaction(draw, min_date=None, max_date=None): + + # As a performance optimization, we don't generate each column individually and reuse + # already generated values. If we don't do this, test generation is too slow and + # Hypothesis' HealthChecks make the tests fail + d = draw(st.dates(min_value=min_date, max_value=max_date)) + text = draw(st.text(alphabet=ALPHABET)) + floats = draw(st.floats(min_value=0.01, max_value=10000000)) + currency = draw(st.sampled_from(KNOWN_CURRENCIES)) + + return (d, text, text, text, floats, floats, currency, text, text, text) + + +@st.composite +def dataframe(draw, min_size=0, max_size=None, min_date=None, max_date=None): + if not min_date: + min_date = datetime.date(1000, 1, 1) + if not max_date: + max_date = datetime.date(9999, 12, 31) + + elements = draw(st.lists( + single_transaction(min_date=min_date, max_date=max_date), + min_size=min_size, + max_size=max_size + )) + + dates, sender_accounts, receiver_accounts, texts, amounts, total_balances, currencies, \ + categories, tagss, origins = [], [], [], [], [], [], [], [], [], [] + + for date, sender_account, receiver_account, text, amount, total_balance, currency, category, tags, origin in elements: + dates.append(date) + sender_accounts.append(sender_account) + receiver_accounts.append(receiver_account) + texts.append(text) + amounts.append(amount) + total_balances.append(total_balance) + currencies.append(currency) + categories.append(category) + tagss.append(tags) + origins.append(origin) + + result_frame = pd.DataFrame({ + 'date': dates, + 'sender_account': sender_accounts, + 'receiver_account': receiver_accounts, + 'text': texts, + 'amount': amounts, + 'total_balance': total_balances, + 'currency': currencies, + 'category': categories, + 'tags': tagss, + 'origin': origins}) + + hash_column = create_id_hash(result_frame) + + result_frame['id'] = hash_column + + return result_frame diff --git a/pynance/transactions_test.py b/pynance/transactions_test.py new file mode 100644 index 0000000..1709b56 --- /dev/null +++ b/pynance/transactions_test.py @@ -0,0 +1,35 @@ +import unittest +from .transactions import dataframe +from hypothesis import given +import numpy as np +from datetime import date + +from pynance.definitions import COLUMNS + + +class DataframeTestCase(unittest.TestCase): + + @given(df=dataframe(min_size=1, max_size=1)) + def test_has_expected_columns(self, df): + types = dict(df.dtypes) + self.assertEqual(len(types), len(COLUMNS)) + for col in COLUMNS: + self.assertTrue(col in types) + + @given(df=dataframe(min_size=1, min_date=date(2000, 1, 1))) + def test_respects_min_date(self, df): + remaining = df['date'][df['date'] < date(2000, 1, 1)] + self.assertEqual(remaining.size, 0) + + @given(df=dataframe(min_size=1, max_date=date(2000, 1, 1))) + def test_respects_max_date(self, df): + remaining = df['date'][df['date'] > date(2000, 1, 1)] + self.assertEqual(remaining.size, 0) + + @given(df=dataframe(min_size=10)) + def test_respects_min_size(self, df): + self.assertGreaterEqual(len(df), 10) + + @given(dataframe(max_size=10)) + def test_respects_max_size(self, df): + self.assertLessEqual(len(df), 10) diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..eecf161 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,22 @@ +pandas +numpy +dash +dash-core-components +dash-html-components + +hypothesis +codecov + +# The latest version of doit supporting Python 2 is 0.29.0 +doit==0.29.0; python_version < '3.0' +doit; python_version >= '3.0' +attrs>17.4 + +pytest==4.0.0; python_version < '3.0' +pytest-cov==2.6.0; python_version < '3.0' +pytest==4.2.0; python_version >= '3.0' +pytest-cov==2.6.1; python_version >= '3.0' + +pylint +pep8 +autopep8 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index acb1e0b..f01f8f7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,18 +2,4 @@ pandas numpy dash dash-core-components -dash-html-components - -hypothesis -codecov - -# The latest version of doit supporting Python 2 is 0.29.0 -doit==0.29.0; python_version < '3.0' -doit; python_version >= '3.0' - -attrs>17.4 - -pytest==4.0.0; python_version < '3.0' -pytest-cov==2.6.0; python_version < '3.0' -pytest==4.2.0; python_version >= '3.0' -pytest-cov==2.6.1; python_version >= '3.0' +dash-html-components \ No newline at end of file diff --git a/unittests.py b/unittests.py new file mode 100644 index 0000000..715a9ae --- /dev/null +++ b/unittests.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python +from __future__ import print_function, absolute_import + +import unittest +import doctest + +import pynance.textimporter_test +import pynance.dash_viz.plot_flow_test +import pynance.database_test +import pynance.parser_test + +def doc_test_suite(): + """ + Returns the testsuite doctests for all modules. + Please don't forget to add new modules here. + """ + + import pkgutil + import importlib + import pynance + + doctest_suite = unittest.TestSuite() + + def add_doctests_for_module(package): + """ + Recursively walks `package` and adds doctests for all submodules + and subpackages to `doctest_suite` + """ + for _, name, is_pkg in pkgutil.walk_packages(package.__path__): + sub_module = importlib.import_module( + '{}.{}'.format(package.__name__, name)) + if is_pkg: + add_doctests_for_module(sub_module) + else: + doctest_suite.addTest(doctest.DocTestSuite(sub_module)) + + add_doctests_for_module(pynance) + return doctest_suite + + +def test_suite(): + suite = unittest.TestSuite() + suite.addTests(pynance.textimporter_test.test_suite()) + suite.addTests(pynance.dash_viz.plot_flow_test.test_suite()) + suite.addTests(pynance.database_test.test_suite()) + suite.addTests(pynance.parser_test.test_suite()) + + suite.addTest(doc_test_suite()) + + return suite + + +def run_all_unit_tests(): + test_runner = unittest.TextTestRunner() + return len(test_runner.run(test_suite()).failures) == 0 + + +if __name__ == "__main__": + import sys + all_tests_ok = run_all_unit_tests() + sys.exit(not all_tests_ok)