Compare commits
10 commits
2840a64215
...
5a8da108b9
Author | SHA1 | Date | |
---|---|---|---|
5a8da108b9 | |||
bd07154fbb | |||
1473d24d6e | |||
9129a4af6a | |||
52ea12d4cf | |||
45bc99e9ac | |||
|
5784068904 | ||
|
6deaacb11b | ||
|
1a0ccd39b0 | ||
9a0ff8da96 |
5 changed files with 55 additions and 11 deletions
|
@ -10,7 +10,6 @@ import datetime
|
||||||
from . import core
|
from . import core
|
||||||
from .. import ranges
|
from .. import ranges
|
||||||
from .. import data
|
from .. import data
|
||||||
from .. import errors as errormod
|
|
||||||
|
|
||||||
from ..beancount_types import (
|
from ..beancount_types import (
|
||||||
Transaction,
|
Transaction,
|
||||||
|
@ -18,6 +17,7 @@ from ..beancount_types import (
|
||||||
|
|
||||||
METADATA_KEY = 'payroll-type'
|
METADATA_KEY = 'payroll-type'
|
||||||
|
|
||||||
|
|
||||||
class _PayrollTypeHook(core._NormalizePostingMetadataHook):
|
class _PayrollTypeHook(core._NormalizePostingMetadataHook):
|
||||||
ACCOUNT: str
|
ACCOUNT: str
|
||||||
VALUES_ENUM = core.MetadataEnum(METADATA_KEY, [])
|
VALUES_ENUM = core.MetadataEnum(METADATA_KEY, [])
|
||||||
|
@ -74,6 +74,7 @@ class SalaryHook(_PayrollTypeHook):
|
||||||
'CA:PTO:Earned',
|
'CA:PTO:Earned',
|
||||||
'CA:PTO:Taken',
|
'CA:PTO:Taken',
|
||||||
'US:403b:Employee',
|
'US:403b:Employee',
|
||||||
|
'US:403b:Employee:Roth',
|
||||||
'US:403b:Match',
|
'US:403b:Match',
|
||||||
'US:General',
|
'US:General',
|
||||||
'US:MA:Disability:PFL',
|
'US:MA:Disability:PFL',
|
||||||
|
@ -98,6 +99,7 @@ class TaxHook(_PayrollTypeHook):
|
||||||
'CA:PP',
|
'CA:PP',
|
||||||
'CA:WCB',
|
'CA:WCB',
|
||||||
'US:IL:Unemployment',
|
'US:IL:Unemployment',
|
||||||
|
'US:TN:Unemployment',
|
||||||
'US:MA:Health',
|
'US:MA:Health',
|
||||||
'US:MA:Unemployment',
|
'US:MA:Unemployment',
|
||||||
'US:MA:WorkTrain',
|
'US:MA:WorkTrain',
|
||||||
|
|
|
@ -54,6 +54,7 @@ class MetaTaxImplication(core._NormalizePostingMetadataHook):
|
||||||
'USA-Corporation',
|
'USA-Corporation',
|
||||||
'USA-Grantee',
|
'USA-Grantee',
|
||||||
'W2',
|
'W2',
|
||||||
|
'Asset-Sale'
|
||||||
]
|
]
|
||||||
_ALIASES = dict(
|
_ALIASES = dict(
|
||||||
alias for value in _STDNAMES for alias in _make_aliases(value)
|
alias for value in _STDNAMES for alias in _make_aliases(value)
|
||||||
|
|
|
@ -12,7 +12,6 @@ In the spirit of bc-reconcile-helper.plx (the original Perl code)
|
||||||
Not implemented:
|
Not implemented:
|
||||||
- --report-group-regex
|
- --report-group-regex
|
||||||
- git branch selection from bean-query-goofy-daemon.plx
|
- git branch selection from bean-query-goofy-daemon.plx
|
||||||
|
|
||||||
"""
|
"""
|
||||||
import argparse
|
import argparse
|
||||||
import csv
|
import csv
|
||||||
|
|
|
@ -17,7 +17,7 @@ representing health insurance for multiple employees.
|
||||||
|
|
||||||
Run it like this:
|
Run it like this:
|
||||||
|
|
||||||
$ statement_reconciler \
|
$ statement-reconciler \
|
||||||
--beancount-file 2021.beancount \
|
--beancount-file 2021.beancount \
|
||||||
--account Liabilities:CreditCard:AMEX \
|
--account Liabilities:CreditCard:AMEX \
|
||||||
--csv-statement ~/svn/2021-09-10_AMEX_activity.csv \
|
--csv-statement ~/svn/2021-09-10_AMEX_activity.csv \
|
||||||
|
@ -270,6 +270,41 @@ def read_fr_csv(f: TextIO) -> list:
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def validate_chase_csv(sample: str) -> None:
|
||||||
|
required_cols = {'Date', 'Description', 'Account', 'Transaction Type', 'Amount'}
|
||||||
|
reader = csv.DictReader(io.StringIO(sample))
|
||||||
|
if reader.fieldnames and not required_cols.issubset(reader.fieldnames):
|
||||||
|
sys.exit(
|
||||||
|
f"This Chase CSV doesn't seem to have the columns we're expecting, including: {', '.join(required_cols)}. Please use an unmodified statement direct from the institution."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def standardize_chase_record(row: Dict, line: int) -> Dict:
|
||||||
|
"""Turn an Chase CSV row into a standard dict format representing a transaction."""
|
||||||
|
return {
|
||||||
|
'date': datetime.datetime.strptime(row['Date'], '%m/%d/%y').date(),
|
||||||
|
'amount': -1 * parse_amount(row['Amount']),
|
||||||
|
# Descriptions have quite a lot of information, but the format is a little
|
||||||
|
# idiosyncratic. We'll need to see more examples before coming up with any ways
|
||||||
|
# to handle it in code. Others have used regular expressions to match the
|
||||||
|
# various transaction types:
|
||||||
|
# https://github.com/mtlynch/beancount-chase-bank/blob/master/beancount_chase/checking.py
|
||||||
|
# See also: https://awesome-beancount.com/
|
||||||
|
'payee': (row['Description'] or '').replace('ORIG CO NAME:')[:20],
|
||||||
|
'check_id': '',
|
||||||
|
'line': line,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def read_chase_csv(f: TextIO) -> list:
|
||||||
|
reader = csv.DictReader(f)
|
||||||
|
# The reader.line_num is the source line number, not the spreadsheet row
|
||||||
|
# number due to multi-line records.
|
||||||
|
return sort_records(
|
||||||
|
[standardize_chase_record(row, i) for i, row in enumerate(reader, 2)]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def standardize_beancount_record(row) -> Dict: # type: ignore[no-untyped-def]
|
def standardize_beancount_record(row) -> Dict: # type: ignore[no-untyped-def]
|
||||||
"""Turn a Beancount query result row into a standard dict representing a transaction."""
|
"""Turn a Beancount query result row into a standard dict representing a transaction."""
|
||||||
return {
|
return {
|
||||||
|
@ -666,12 +701,13 @@ def parse_repo_relative_path(path: str) -> str:
|
||||||
"""
|
"""
|
||||||
if not os.path.exists(path):
|
if not os.path.exists(path):
|
||||||
raise argparse.ArgumentTypeError(f'File {path} does not exist.')
|
raise argparse.ArgumentTypeError(f'File {path} does not exist.')
|
||||||
repo = os.getenv('CONSERVANCY_REPOSITORY')
|
real_path = os.path.realpath(path)
|
||||||
|
repo = os.path.realpath(os.getenv('CONSERVANCY_REPOSITORY'))
|
||||||
if not repo:
|
if not repo:
|
||||||
raise argparse.ArgumentTypeError('$CONSERVANCY_REPOSITORY is not set.')
|
raise argparse.ArgumentTypeError('$CONSERVANCY_REPOSITORY is not set.')
|
||||||
if not path.startswith(repo):
|
if not real_path.startswith(repo):
|
||||||
raise argparse.ArgumentTypeError(
|
raise argparse.ArgumentTypeError(
|
||||||
f'File {path} does not share a common prefix with $CONSERVANCY_REPOSITORY {repo}.'
|
f'File {real_path} does not share a common prefix with $CONSERVANCY_REPOSITORY {repo}.'
|
||||||
)
|
)
|
||||||
return path
|
return path
|
||||||
|
|
||||||
|
@ -783,9 +819,14 @@ def main(
|
||||||
if 'AMEX' in args.account:
|
if 'AMEX' in args.account:
|
||||||
validate_csv = validate_amex_csv
|
validate_csv = validate_amex_csv
|
||||||
read_csv = read_amex_csv
|
read_csv = read_amex_csv
|
||||||
else:
|
elif 'FR' in args.account:
|
||||||
validate_csv = validate_fr_csv
|
validate_csv = validate_fr_csv
|
||||||
read_csv = read_fr_csv
|
read_csv = read_fr_csv
|
||||||
|
elif 'Chase' in args.account:
|
||||||
|
validate_csv = validate_chase_csv
|
||||||
|
read_csv = read_chase_csv
|
||||||
|
else:
|
||||||
|
sys.exit("This account provided doesn't match one of AMEX, FR or Chase.")
|
||||||
|
|
||||||
with open(args.csv_statement) as f:
|
with open(args.csv_statement) as f:
|
||||||
sample = f.read(200)
|
sample = f.read(200)
|
||||||
|
@ -834,7 +875,7 @@ def main(
|
||||||
AND date >= {begin_date}
|
AND date >= {begin_date}
|
||||||
AND date <= {end_date}"""
|
AND date <= {end_date}"""
|
||||||
_, result_rows = run_query(entries, options, query)
|
_, result_rows = run_query(entries, options, query)
|
||||||
books_trans = sort_records([standardize_beancount_record(row) for row in result_rows])
|
books_trans = sort_records(standardize_beancount_record(row) for row in result_rows)
|
||||||
|
|
||||||
# Apply two passes of matching, one for standard matches and one
|
# Apply two passes of matching, one for standard matches and one
|
||||||
# for subset matches.
|
# for subset matches.
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[metadata]
|
[metadata]
|
||||||
name = conservancy_beancount
|
name = conservancy_beancount
|
||||||
version = 1.19.8
|
version = 1.20.0
|
||||||
author = Software Freedom Conservancy
|
author = Software Freedom Conservancy
|
||||||
author_email = info@sfconservancy.org
|
author_email = info@sfconservancy.org
|
||||||
description = Plugin, library, and reports for reading Conservancy’s books
|
description = Plugin, library, and reports for reading Conservancy’s books
|
||||||
|
@ -32,7 +32,8 @@ warn_unused_configs = True
|
||||||
include_package_data = True
|
include_package_data = True
|
||||||
install_requires =
|
install_requires =
|
||||||
babel>=2.6
|
babel>=2.6
|
||||||
beancount>=2.2
|
beancount>=2.2,<3.0.0
|
||||||
|
colorama
|
||||||
GitPython>=2.0
|
GitPython>=2.0
|
||||||
odfpy>=1.4.0,!=1.4.1
|
odfpy>=1.4.0,!=1.4.1
|
||||||
pdfminer.six>=20200101
|
pdfminer.six>=20200101
|
||||||
|
@ -105,4 +106,4 @@ filterwarnings =
|
||||||
ignore::DeprecationWarning:^socks$
|
ignore::DeprecationWarning:^socks$
|
||||||
|
|
||||||
[tox:tox]
|
[tox:tox]
|
||||||
envlist = py36,py37
|
envlist = py310
|
||||||
|
|
Loading…
Reference in a new issue