Having a "table" in memory that needs lookups, sorting, and arbitrary aggregation really does call out for SQL. You said you tried SQLite, but did you realize that SQLite can use an in-memory-only database?
connection = sqlite3.connect(':memory:')
Then you can create/drop/query/update tables in memory with all the functionality of SQLite and no files left over when you're done. And as of Python 2.5, sqlite3
is in the standard library, so it's not really "overkill" IMO.
Here is a sample of how one might create and populate the database:
import csv
import sqlite3
db = sqlite3.connect(':memory:')
def init_db(cur):
cur.execute('''CREATE TABLE foo (
Row INTEGER,
Name TEXT,
Year INTEGER,
Priority INTEGER)''')
def populate_db(cur, csv_fp):
rdr = csv.reader(csv_fp)
cur.executemany('''
INSERT INTO foo (Row, Name, Year, Priority)
VALUES (?,?,?,?)''', rdr)
cur = db.cursor()
init_db(cur)
populate_db(cur, open('my_csv_input_file.csv'))
db.commit()
If you'd really prefer not to use SQL, you should probably use a list of dictionaries:
lod = [ ] # "list of dicts"
def populate_lod(lod, csv_fp):
rdr = csv.DictReader(csv_fp, ['Row', 'Name', 'Year', 'Priority'])
lod.extend(rdr)
def query_lod(lod, filter=None, sort_keys=None):
if filter is not None:
lod = (r for r in lod if filter(r))
if sort_keys is not None:
lod = sorted(lod, key=lambda r:[r[k] for k in sort_keys])
else:
lod = list(lod)
return lod
def lookup_lod(lod, **kw):
for row in lod:
for k,v in kw.iteritems():
if row[k] != str(v): break
else:
return row
return None
Testing then yields:
>>> lod = []
>>> populate_lod(lod, csv_fp)
>>>
>>> pprint(lookup_lod(lod, Row=1))
{'Name': 'Cat', 'Priority': '1', 'Row': '1', 'Year': '1998'}
>>> pprint(lookup_lod(lod, Name='Aardvark'))
{'Name': 'Aardvark', 'Priority': '1', 'Row': '4', 'Year': '2000'}
>>> pprint(query_lod(lod, sort_keys=('Priority', 'Year')))
[{'Name': 'Cat', 'Priority': '1', 'Row': '1', 'Year': '1998'},
{'Name': 'Dog', 'Priority': '1', 'Row': '3', 'Year': '1999'},
{'Name': 'Aardvark', 'Priority': '1', 'Row': '4', 'Year': '2000'},
{'Name': 'Wallaby', 'Priority': '1', 'Row': '5', 'Year': '2000'},
{'Name': 'Fish', 'Priority': '2', 'Row': '2', 'Year': '1998'},
{'Name': 'Zebra', 'Priority': '3', 'Row': '6', 'Year': '2001'}]
>>> pprint(query_lod(lod, sort_keys=('Year', 'Priority')))
[{'Name': 'Cat', 'Priority': '1', 'Row': '1', 'Year': '1998'},
{'Name': 'Fish', 'Priority': '2', 'Row': '2', 'Year': '1998'},
{'Name': 'Dog', 'Priority': '1', 'Row': '3', 'Year': '1999'},
{'Name': 'Aardvark', 'Priority': '1', 'Row': '4', 'Year': '2000'},
{'Name': 'Wallaby', 'Priority': '1', 'Row': '5', 'Year': '2000'},
{'Name': 'Zebra', 'Priority': '3', 'Row': '6', 'Year': '2001'}]
>>> print len(query_lod(lod, lambda r:1997 <= int(r['Year']) <= 2002))
6
>>> print len(query_lod(lod, lambda r:int(r['Year'])==1998 and int(r['Priority']) > 2))
0
Personally I like the SQLite version better since it preserves your types better (without extra conversion code in Python) and easily grows to accommodate future requirements. But then again, I'm quite comfortable with SQL, so YMMV.