pax_global_header00006660000000000000000000000064147457677160014542gustar00rootroot0000000000000052 comment=12a22de74a74003ae231849f2fcb5063e9f928de
beanquery-master/000077500000000000000000000000001474576771600143725ustar00rootroot00000000000000beanquery-master/.github/000077500000000000000000000000001474576771600157325ustar00rootroot00000000000000beanquery-master/.github/workflows/000077500000000000000000000000001474576771600177675ustar00rootroot00000000000000beanquery-master/.github/workflows/checks.yaml000066400000000000000000000030371474576771600221160ustar00rootroot00000000000000name: checks
on:
- push
- pull_request
jobs:
ruff:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- run: python -m pip install ruff
- run: ruff check beanquery/
coverage:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- run: pip install -r requirements.txt coverage
- name: Run coverage
# Check tests coverage. Instead of checking project coverage of
# all the tests we check that each module is exaustively tested
# by the dedicated units tests.
run: |
set -x
echo '{
"beanquery/parser/*": "beanquery/parser_test.py",
"beanquery/query_render.py": "beanquery/query_render_test.py"
}' | jq -rc 'to_entries | .[] | (.key + "=" + .value)' | while IFS='=' read src test
do
python -m coverage run --branch --include "$src" --omit beanquery/parser/parser.py -m unittest "$test"
python -m coverage report --fail-under=100 -m
python -m coverage erase
done
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- run: pip install -r requirements.txt wheel build
- run: python -m build --no-isolation
- run: python -m pip install dist/beanquery-*.whl
beanquery-master/.github/workflows/docs.yml000066400000000000000000000012111474576771600214350ustar00rootroot00000000000000name: docs
on:
push:
pull_request:
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- uses: actions/checkout@v4
- run: python -m pip install .[docs]
- run: python -m sphinx -W -b html docs/ build/html/
- uses: actions/upload-pages-artifact@v3
with:
path: build/html
deploy:
needs: build
permissions:
pages: write
id-token: write
environment:
name: github-pages
runs-on: ubuntu-latest
if: github.ref == 'refs/heads/master'
steps:
- uses: actions/deploy-pages@v4
beanquery-master/.github/workflows/release.yaml000066400000000000000000000011571474576771600222770ustar00rootroot00000000000000name: release
on:
push:
tags:
- 'v[0-9]*'
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- run: python -m pip install build
- run: python -m build
- uses: actions/upload-artifact@v4
with:
path: dist/*
upload:
needs: build
runs-on: ubuntu-latest
environment: upload
permissions:
id-token: write
steps:
- uses: actions/download-artifact@v4
with:
merge-multiple: true
path: dist
- uses: pypa/gh-action-pypi-publish@release/v1
with:
attestations: false
beanquery-master/.github/workflows/test.yaml000066400000000000000000000016621474576771600216370ustar00rootroot00000000000000name: test
on:
- push
- pull_request
jobs:
test:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python:
- '3.8'
- '3.9'
- '3.10'
- '3.11'
- '3.12'
- '3.13'
beancount:
- '~= 2.3.6'
- '~= 3.0.0'
- '@ git+https://github.com/beancount/beancount.git'
exclude:
- python: '3.8'
beancount: '@ git+https://github.com/beancount/beancount.git'
- python: '3.9'
beancount: '@ git+https://github.com/beancount/beancount.git'
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python }}
allow-prereleases: true
- run: pip install 'beancount ${{ matrix.beancount }}'
- run: pip install -r requirements.txt
- run: python -m unittest discover -p '*_test.py'
beanquery-master/.gitignore000066400000000000000000000000151474576771600163560ustar00rootroot00000000000000__pycache__/
beanquery-master/CHANGES.rst000066400000000000000000000042541474576771600162010ustar00rootroot00000000000000Version 0.1 (unreleased)
------------------------
- The ``HAVING`` clause for aggregate queries is now supported.
- The ``empty()`` BQL function to determine whether an Inventory
object as returned by the ``sum()`` aggregate function is empty has
been added.
- Added the ``round()`` BQL function.
- ``NULL`` values in ``SORT BY`` clause are now always considered to
be smaller than any other values. This may results in rows to be
returned in a slightly different order.
- It is now possible to specify the direction of the ordering for each
column in the ``SORT BY`` clause. This brings BQL closer to SQL
specification but queries written with the old behaviour in mind
will return rows in a different order. The query::
SELECT date, narration ORDER BY date, narration DESC
used to return rows in descending order by both ``date`` and
``narration`` while now it would order the rows ascending by
``date`` and descending by ``narration``. To recover the old
behavior, the query should be written::
SELECT date, narration ORDER BY date DESC, narration DESC
- Type casting functions ``int()``, ``decimal()``, ``str()``,
``date()`` have been added. These are mostly useful to convert the
generic ``object`` type returned by the metadata retrieval functions
but can also be used to convert between types. If the conversion
fails, ``NULL`` is returned.
- The ``str()`` BQL function used to return a string representation of
its argument using the Python :py:func:`repr()` function. This
clashes with the use of ``str()`` as a type casting function. The
function is renamed ``repr()``.
- The ``date()`` BQL function used to extract a date from string
arguments with a very relaxed parser. This clashes with the use of
``date()`` as a type casting function. The function is renamed
``parse_date()``. Another form of ``parse_date()`` that accepts the
date format as second argument has been added.
- The ``getitem()`` BQL function return type has been changed from a
string to a generic ``object`` to match the return type of function
retrieving entries from metadata dictionaries. The old behavior can
be obtained with ``str(getitem(x, key))``.
beanquery-master/LICENSE000066400000000000000000000432541474576771600154070ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.
beanquery-master/README.rst000066400000000000000000000004171474576771600160630ustar00rootroot00000000000000beanquery: Customizable lightweight SQL query tool
==================================================
beanquery is a customizable and extensible lightweight SQL query tool
that works on tabular data, including `Beancount`__ ledger data.
__ https://beancount.github.io/
beanquery-master/beanquery/000077500000000000000000000000001474576771600163655ustar00rootroot00000000000000beanquery-master/beanquery/__init__.py000066400000000000000000000032661474576771600205050ustar00rootroot00000000000000import importlib
from urllib.parse import urlparse
from . import parser
from . import compiler
from . import tables
from .compiler import CompilationError
from .cursor import Cursor, Column
from .errors import Warning, Error, InterfaceError, DatabaseError, DataError, OperationalError
from .errors import IntegrityError, InternalError, ProgrammingError, NotSupportedError
from .parser import ParseError
__version__ = '0.1.dev1'
# DB-API compliance
apilevel = '2.0'
threadsafety = 2
paramstyle = 'pyformat'
def connect(dsn, **kwargs):
return Connection(dsn, **kwargs)
class Connection:
def __init__(self, dsn='', **kwargs):
self.tables = {'': tables.NullTable()}
self.options = {}
self.errors = []
if dsn:
self.attach(dsn, **kwargs)
def attach(self, dsn, **kwargs):
scheme = urlparse(dsn).scheme
source = importlib.import_module(f'beanquery.sources.{scheme}')
source.attach(self, dsn, **kwargs)
def close(self):
# Required by the DB-API.
pass
def parse(self, query):
return parser.parse(query)
def compile(self, query):
return compiler.compile(self, query)
def execute(self, query, params=None):
return self.cursor().execute(query, params)
def cursor(self):
return Cursor(self)
__all__ = [
'Column',
'CompilationError',
'Connection',
'Cursor',
'DataError',
'DatabaseError',
'Error',
'IntegrityError',
'InterfaceError',
'InternalError',
'NotSupportedError',
'OperationalError',
'ParseError',
'ProgrammingError',
'Warning',
'apilevel',
'connet',
'paramstyle',
'threadsafety',
]
beanquery-master/beanquery/__main__.py000066400000000000000000000001111474576771600204500ustar00rootroot00000000000000from beanquery import shell
if __name__ == '__main__':
shell.main()
beanquery-master/beanquery/compiler.py000066400000000000000000001017161474576771600205570ustar00rootroot00000000000000import collections.abc
import typing
from decimal import Decimal
from functools import singledispatchmethod
from typing import Optional, Sequence, Mapping, Union
from . import types
from . import parser
from .errors import ProgrammingError
from .parser import ast
from .query_compile import (
EvalAggregator,
EvalAnd,
EvalAll,
EvalAny,
EvalCoalesce,
EvalColumn,
EvalConstant,
EvalGetItem,
EvalGetter,
EvalOr,
EvalPivot,
EvalQuery,
EvalConstantSubquery1D,
EvalRow,
EvalTarget,
FUNCTIONS,
OPERATORS,
SubqueryTable,
)
# A global constant which sets whether we support inferred/implicit group-by
# semantics.
SUPPORT_IMPLICIT_GROUPBY = True
class CompilationError(ProgrammingError):
def __init__(self, message, node=None):
super().__init__(message)
self.parseinfo = node.parseinfo if node is not None else None
class Compiler:
def __init__(self, context):
self.context = context
self.stack = [context.tables.get('postings')]
@property
def table(self):
return self.stack[-1]
@table.setter
def table(self, value):
self.stack[-1] = value
def compile(self, query, parameters=None):
"""Compile an AST into an executable statement."""
self.parameters = parameters
placeholders = [node for node in query.walk() if isinstance(node, ast.Placeholder)]
if placeholders:
names = {placeholder.name for placeholder in placeholders}
if all(names):
if not isinstance(parameters, Mapping):
raise TypeError('query parameters should be a mapping when using named placeholders')
if names - parameters.keys():
missing = ', '.join(sorted(names - parameters.keys()))
raise ProgrammingError(f'query parameter missing: {missing}')
elif not any(names):
if not isinstance(parameters, Sequence):
raise TypeError('query parameters should be a sequence when using positional placeholders')
if len(placeholders) != len(parameters):
raise ProgrammingError(
f'the query has {len(placeholders)} placeholders but {len(parameters)} parameters were passed')
for i, placeholder in enumerate(sorted(placeholders, key=lambda node: node.parseinfo.pos)):
placeholder.name = i
else:
raise ProgrammingError('positional and named parameters cannot be mixed')
return self._compile(query)
@singledispatchmethod
def _compile(self, node: Optional[ast.Node]):
if node is None:
return None
raise NotImplementedError
@_compile.register
def _select(self, node: ast.Select):
self.stack.append(self.table)
# Compile the FROM clause.
c_from_expr = self._compile_from(node.from_clause)
# Compile the targets.
c_targets = self._compile_targets(node.targets)
# Bind the WHERE expression to the execution environment.
c_where = self._compile(node.where_clause)
# Check that the FROM clause does not contain aggregates. This
# should never trigger if the compilation environment does not
# contain any aggregate.
if c_where is not None and is_aggregate(c_where):
raise CompilationError('aggregates are not allowed in WHERE clause')
# Combine FROM and WHERE clauses
if c_from_expr is not None:
c_where = c_from_expr if c_where is None else EvalAnd([c_from_expr, c_where])
# Process the GROUP-BY clause.
new_targets, group_indexes, having_index = self._compile_group_by(node.group_by, c_targets)
c_targets.extend(new_targets)
# Process the ORDER-BY clause.
new_targets, order_spec = self._compile_order_by(node.order_by, c_targets)
c_targets.extend(new_targets)
# If this is an aggregate query (it groups, see list of indexes), check that
# the set of non-aggregates match exactly the group indexes. This should
# always be the case at this point, because we have added all the necessary
# targets to the list of group-by expressions and should have resolved all
# the indexes.
if group_indexes is not None:
non_aggregate_indexes = {index for index, c_target in enumerate(c_targets)
if not c_target.is_aggregate}
if non_aggregate_indexes != set(group_indexes):
missing_names = ['"{}"'.format(c_targets[index].name)
for index in non_aggregate_indexes - set(group_indexes)]
raise CompilationError(
'all non-aggregates must be covered by GROUP-BY clause in aggregate query: '
'the following targets are missing: {}'.format(','.join(missing_names)))
query = EvalQuery(self.table,
c_targets,
c_where,
group_indexes,
having_index,
order_spec,
node.limit,
node.distinct)
pivots = self._compile_pivot_by(node.pivot_by, c_targets, group_indexes)
if pivots:
return EvalPivot(query, pivots)
self.stack.pop()
return query
def _compile_from(self, node):
if node is None:
return None
# Subquery.
if isinstance(node, ast.Select):
self.table = SubqueryTable(self._compile(node))
return None
# Table reference.
if isinstance(node, ast.Table):
self.table = self.context.tables.get(node.name)
if self.table is None:
raise CompilationError(f'table "{node.name}" does not exist', node)
return None
# FROM expression.
if isinstance(node, ast.From):
c_expression = self._compile(node.expression)
# Check that the FROM clause does not contain aggregates.
if c_expression is not None and is_aggregate(c_expression):
raise CompilationError('aggregates are not allowed in FROM clause')
if node.open and node.close and node.open > node.close:
raise CompilationError('CLOSE date must follow OPEN date')
# Apply OPEN, CLOSE, and CLEAR clauses.
self.table = self.table.update(open=node.open, close=node.close, clear=node.clear)
return c_expression
raise NotImplementedError
def _compile_targets(self, targets):
"""Compile the targets and check for their validity. Process wildcard.
Args:
targets: A list of target expressions from the parser.
Returns:
A list of compiled target expressions with resolved names.
"""
# Bind the targets expressions to the execution context.
if isinstance(targets, ast.Asterisk):
# Insert the full list of available columns.
targets = [ast.Target(ast.Column(name), None)
for name in self.table.wildcard_columns]
# Compile targets.
c_targets = []
for target in targets:
c_expr = self._compile(target.expression)
name = get_target_name(target)
c_targets.append(EvalTarget(c_expr, name, is_aggregate(c_expr)))
columns, aggregates = get_columns_and_aggregates(c_expr)
# Check for mixed aggregates and non-aggregates.
if columns and aggregates:
raise CompilationError('mixed aggregates and non-aggregates are not allowed')
# Check for aggregates of aggregates.
for aggregate in aggregates:
for child in aggregate.childnodes():
if is_aggregate(child):
raise CompilationError('aggregates of aggregates are not allowed')
return c_targets
def _compile_order_by(self, order_by, c_targets):
"""Process an order-by clause.
Args:
order_by: A OrderBy instance as provided by the parser.
c_targets: A list of compiled target expressions.
Returns:
A tuple of
new_targets: A list of new compiled target nodes.
order_spec: A list of (integer indexes, sort order) tuples.
"""
if not order_by:
return [], None
# Compile order-by expressions and resolve them to their targets if
# possible. A ORDER-BY column may be one of the following:
#
# * A reference to a target by name.
# * A reference to a target by index (starting at one).
# * A new expression, aggregate or not.
#
# References by name are converted to indexes. New expressions are
# inserted into the list of targets as invisible targets.
new_targets = c_targets[:]
c_target_expressions = [c_target.c_expr for c_target in c_targets]
targets_name_map = {target.name: idx for idx, target in enumerate(c_targets) if target.name is not None}
# Only targets appearing in the SELECT targets list can be
# referenced by index. These are guaranteed to have a valid name.
n_targets = len(targets_name_map)
order_spec = []
for spec in order_by:
column = spec.column
descending = spec.ordering
index = None
# Process target references by index.
if isinstance(column, int):
index = column - 1
if not 0 <= index < n_targets:
raise CompilationError(f'invalid ORDER-BY column index {column}')
else:
# Process target references by name. These will be parsed as
# simple Column expressions. If they refer to a target name, we
# resolve them.
if isinstance(column, ast.Column):
name = column.name
index = targets_name_map.get(name, None)
# Otherwise we compile the expression and add it to the list of
# targets to evaluate and index into that new target.
if index is None:
c_expr = self._compile(column)
# Attempt to reconcile the expression with one of the existing
# target expressions.
try:
index = c_target_expressions.index(c_expr)
except ValueError:
# Add the new target. 'None' for the target name implies it
# should be invisible, not to be rendered.
index = len(new_targets)
new_targets.append(EvalTarget(c_expr, None, is_aggregate(c_expr)))
c_target_expressions.append(c_expr)
assert index is not None, "Internal error, could not index order-by reference."
order_spec.append((index, descending))
return new_targets[len(c_targets):], order_spec
def _compile_pivot_by(self, pivot_by, targets, group_indexes):
"""Compiles a PIVOT BY clause.
Resolve and validate columns references in the PIVOT BY clause.
The PIVOT BY clause accepts two name od index references to
columns in the SELECT targets list. The second columns should be a
GROUP BY column so that the values of the pivot column are unique.
"""
if pivot_by is None:
return None
indexes = []
names = {target.name: index for index, target in enumerate(targets)}
for column in pivot_by.columns:
# Process target references by index.
if isinstance(column, int):
index = column - 1
if not 0 <= index < len(targets):
raise CompilationError(f'invalid PIVOT BY column index {column}')
indexes.append(index)
continue
# Process target references by name.
if isinstance(column, ast.Column):
index = names.get(column.name, None)
if index is None:
raise CompilationError(f'PIVOT BY column {column!r} is not in the targets list')
indexes.append(index)
continue
# Not reached.
raise RuntimeError
# Sanity checks.
if indexes[0] == indexes[1]:
raise CompilationError('the two PIVOT BY columns cannot be the same column')
if indexes[1] not in group_indexes:
raise CompilationError('the second PIVOT BY column must be a GROUP BY column')
return indexes
def _compile_group_by(self, group_by, c_targets):
"""Process a group-by clause.
Args:
group_by: A GroupBy instance as provided by the parser.
c_targets: A list of compiled target expressions.
Returns:
A tuple of
new_targets: A list of new compiled target nodes.
group_indexes: If the query is an aggregate query, a list of integer
indexes to be used for processing grouping. Note that this list may be
empty (in the case of targets with only aggregates). On the other hand,
if this is not an aggregated query, this is set to None. So do
distinguish the empty list vs. None.
"""
new_targets = c_targets[:]
c_target_expressions = [c_target.c_expr for c_target in c_targets]
group_indexes = []
having_index = None
if group_by:
assert group_by.columns, "Internal error with GROUP-BY parsing"
# Compile group-by expressions and resolve them to their targets if
# possible. A GROUP-BY column may be one of the following:
#
# * A reference to a target by name.
# * A reference to a target by index (starting at one).
# * A new, non-aggregate expression.
#
# References by name are converted to indexes. New expressions are
# inserted into the list of targets as invisible targets.
targets_name_map = {target.name: index for index, target in enumerate(c_targets)}
for column in group_by.columns:
index = None
# Process target references by index.
if isinstance(column, int):
index = column - 1
if not 0 <= index < len(c_targets):
raise CompilationError(f'invalid GROUP-BY column index {column}')
else:
# Process target references by name. These will be parsed as
# simple Column expressions. If they refer to a target name, we
# resolve them.
if isinstance(column, ast.Column):
name = column.name
index = targets_name_map.get(name, None)
# Otherwise we compile the expression and add it to the list of
# targets to evaluate and index into that new target.
if index is None:
c_expr = self._compile(column)
# Check if the new expression is an aggregate.
aggregate = is_aggregate(c_expr)
if aggregate:
raise CompilationError(f'GROUP-BY expressions may not be aggregates: "{column}"')
# Attempt to reconcile the expression with one of the existing
# target expressions.
try:
index = c_target_expressions.index(c_expr)
except ValueError:
# Add the new target. 'None' for the target name implies it
# should be invisible, not to be rendered.
index = len(new_targets)
new_targets.append(EvalTarget(c_expr, None, aggregate))
c_target_expressions.append(c_expr)
assert index is not None, "Internal error, could not index group-by reference."
group_indexes.append(index)
# Check that the group-by column references a non-aggregate.
c_expr = new_targets[index].c_expr
if is_aggregate(c_expr):
raise CompilationError(f'GROUP-BY expressions may not reference aggregates: "{column}"')
# Check that the group-by column has a supported hashable type.
if not issubclass(c_expr.dtype, collections.abc.Hashable):
raise CompilationError(f'GROUP-BY a non-hashable type is not supported: "{column}"')
# Compile HAVING clause.
if group_by.having is not None:
c_expr = self._compile(group_by.having)
if not is_aggregate(c_expr):
raise CompilationError('the HAVING clause must be an aggregate expression')
having_index = len(new_targets)
new_targets.append(EvalTarget(c_expr, None, True))
c_target_expressions.append(c_expr)
else:
# If it does not have a GROUP-BY clause...
aggregate_bools = [c_target.is_aggregate for c_target in c_targets]
if any(aggregate_bools):
# If the query is an aggregate query, check that all the targets are
# aggregates.
if all(aggregate_bools):
# FIXME: shold we really be checking for the empty
# list or is checking for a false value enough?
assert group_indexes == []
elif SUPPORT_IMPLICIT_GROUPBY:
# If some of the targets aren't aggregates, automatically infer
# that they are to be implicit group by targets. This makes for
# a much more convenient syntax for our lightweight SQL, where
# grouping is optional.
group_indexes = [
index for index, c_target in enumerate(c_targets)
if not c_target.is_aggregate]
else:
raise CompilationError('aggregate query without a GROUP-BY should have only aggregates')
else:
# This is not an aggregate query; don't set group_indexes to
# anything useful, we won't need it.
group_indexes = None
return new_targets[len(c_targets):], group_indexes, having_index
@_compile.register
def _column(self, node: ast.Column):
column = self.table.columns.get(node.name)
if column is not None:
return column
raise CompilationError(f'column "{node.name}" not found in table "{self.table.name}"', node)
@_compile.register
def _or(self, node: ast.Or):
return EvalOr([self._compile(arg) for arg in node.args])
@_compile.register
def _and(self, node: ast.And):
return EvalAnd([self._compile(arg) for arg in node.args])
_OPERATORS = {
'<': ast.Less,
'<=': ast.LessEq,
'>': ast.Greater,
'>=': ast.GreaterEq,
'=': ast.Equal,
'!=': ast.NotEqual,
'~': ast.Match,
'!~': ast.NotMatch,
'?~': ast.Matches,
}
# dispatching on an Union is supported only starting with Python 3.11
@_compile.register(ast.All)
@_compile.register(ast.Any)
def _all(self, node):
right = self._compile(node.right)
if isinstance(right, EvalQuery):
if len(right.columns) != 1:
raise CompilationError('subquery has too many columns', node.right)
right = EvalConstantSubquery1D(right)
right_dtype = typing.get_origin(right.dtype) or right.dtype
if right_dtype not in {list, set}:
raise CompilationError(f'not a list or set but {right_dtype}', node.right)
args = typing.get_args(right.dtype)
if args:
assert len(args) == 1
right_element_dtype = args[0]
else:
right_element_dtype = object
left = self._compile(node.left)
# lookup operator implementaton and check typing
op = self._OPERATORS[node.op]
for func in OPERATORS[op]:
if func.__intypes__ == [right_element_dtype, left.dtype]:
break
else:
raise CompilationError(
f'operator "{op.__name__.lower()}('
f'{left.dtype.__name__}, {right_element_dtype.__name__})" not supported', node)
# need to instantiate the operaotr implementation to get to the underlying function
operator = func(None, None).operator
cls = EvalAll if type(node) is ast.All else EvalAny
return cls(operator, left, right)
@_compile.register
def _function(self, node: ast.Function):
operands = [self._compile(operand) for operand in node.operands]
# ``row(*)`` is parsed like a function call but does something special
if node.fname == 'row' and len(operands) == 1 and operands[0].dtype == types.Asterisk:
return EvalRow()
# ``coalesce()`` is parsed like a function call but it does
# not really fit our model for function evaluation, therefore
# it gets special threatment here.
if node.fname == 'coalesce':
for operand in operands:
if operand.dtype != operands[0].dtype:
dtypes = ', '.join(operand.dtype.__name__ for operand in operands)
raise CompilationError(f'coalesce() function arguments must have uniform type, found: {dtypes}', node)
return EvalCoalesce(operands)
function = types.function_lookup(FUNCTIONS, node.fname, operands)
if function is None:
sig = '{}({})'.format(node.fname, ', '.join(f'{operand.dtype.__name__.lower()}' for operand in operands))
raise CompilationError(f'no function matches "{sig}" name and argument types', node)
# Replace ``meta(key)`` with ``meta[key]``.
if node.fname == 'meta':
key = node.operands[0]
node = ast.Function('getitem', [ast.Column('meta', parseinfo=node.parseinfo), key])
return self._compile(node)
# Replace ``entry_meta(key)`` with ``entry.meta[key]``.
if node.fname == 'entry_meta':
key = node.operands[0]
node = ast.Function('getitem', [ast.Attribute(ast.Column('entry', parseinfo=node.parseinfo), 'meta'), key])
return self._compile(node)
# Replace ``any_meta(key)`` with ``getitem(meta, key, entry.meta[key])``.
if node.fname == 'any_meta':
key = node.operands[0]
node = ast.Function('getitem', [ast.Column('meta', parseinfo=node.parseinfo), key, ast.Function('getitem', [
ast.Attribute(ast.Column('entry', parseinfo=node.parseinfo), 'meta'), key])])
return self._compile(node)
# Replace ``has_account(regexp)`` with ``('(?i)' + regexp) ~? any (accounts)``.
if node.fname == 'has_account':
node = ast.Any(ast.Add(ast.Constant('(?i)'), node.operands[0]), '?~', ast.Column('accounts'))
return self._compile(node)
function = function(self.context, operands)
# Constants folding.
if all(isinstance(operand, EvalConstant) for operand in operands) and function.pure:
return EvalConstant(function(None), function.dtype)
return function
@_compile.register
def _subscript(self, node: ast.Subscript):
operand = self._compile(node.operand)
if issubclass(operand.dtype, dict):
return EvalGetItem(operand, node.key)
raise CompilationError('column type is not subscriptable', node)
@_compile.register
def _attribute(self, node: ast.Attribute):
operand = self._compile(node.operand)
dtype = types.ALIASES.get(operand.dtype, operand.dtype)
if issubclass(dtype, types.Structure):
getter = dtype.columns.get(node.name)
if getter is None:
raise CompilationError(f'structured type has no attribute "{node.name}"', node)
return EvalGetter(operand, getter, getter.dtype)
raise CompilationError('column type is not structured', node)
@_compile.register
def _unaryop(self, node: ast.UnaryOp):
operand = self._compile(node.operand)
function = types.function_lookup(OPERATORS, type(node), [operand])
if function is None:
raise CompilationError(
f'operator "{type(node).__name__.lower()}({types.name(operand.dtype)})" not supported', node)
function = function(operand)
# Constants folding.
if isinstance(operand, EvalConstant):
return EvalConstant(function(None), function.dtype)
return function
@_compile.register
def _between(self, node: ast.Between):
operand = self._compile(node.operand)
lower = self._compile(node.lower)
upper = self._compile(node.upper)
intypes = [operand.dtype, lower.dtype, upper.dtype]
for candidate in OPERATORS[type(node)]:
if candidate.__intypes__ == intypes:
func = candidate(operand, lower, upper)
return func
raise CompilationError(
f'operator "{types.name(operand.dtype)} BETWEEN {types.name(lower.dtype)} '
f'AND {types.name(upper.dtype)}" not supported', node)
@_compile.register(ast.In)
@_compile.register(ast.NotIn)
def _inop(self, node: Union[ast.In, ast.NotIn]):
left = self._compile(node.left)
right = self._compile(node.right)
if isinstance(right, EvalQuery):
if len(right.columns) != 1:
raise CompilationError('subquery has too many columns', node.right)
right = EvalConstantSubquery1D(right)
op = OPERATORS[type(node)][0]
return op(left, right)
@_compile.register
def _binaryop(self, node: ast.BinaryOp):
left = self._compile(node.left)
right = self._compile(node.right)
candidates = OPERATORS[type(node)]
while True:
intypes = [left.dtype, right.dtype]
for op in candidates:
if op.__intypes__ == intypes:
function = op(left, right)
# Constants folding.
if isinstance(left, EvalConstant) and isinstance(right, EvalConstant):
return EvalConstant(function(None), function.dtype)
return function
# Implement type inference when one of the operands is not strongly typed.
if left.dtype is object and right.dtype is not object:
target = right.dtype
if target is int:
# The Beancount parser does not emit int typed
# values, thus casting to int is only going to
# loose information. Promote to decimal.
target = Decimal
name = types.MAP.get(target)
if name is None:
break
left = types.function_lookup(FUNCTIONS, name, [left])(self.context, [left])
continue
if right.dtype is object and left.dtype is not object:
target = left.dtype
if target is int:
# The Beancount parser does not emit int typed
# values, thus casting to int is only going to
# loose information. Promote to decimal.
target = Decimal
name = types.MAP.get(target)
if name is None:
break
right = types.function_lookup(FUNCTIONS, name, [right])(self.context, [right])
continue
# Failure.
break
raise CompilationError(
f'operator "{type(node).__name__.lower()}('
f'{types.name(left.dtype)}, {types.name(right.dtype)})" not supported', node)
@_compile.register
def _constant(self, node: ast.Constant):
return EvalConstant(node.value)
@_compile.register
def _placeholder(self, node: ast.Placeholder):
return EvalConstant(self.parameters[node.name])
@_compile.register
def _asterisk(self, node: ast.Asterisk):
return EvalConstant(None, dtype=types.Asterisk)
@_compile.register
def _balances(self, node: ast.Balances):
return self._compile(transform_balances(node))
@_compile.register
def _journal(self, node: ast.Journal):
return self._compile(transform_journal(node))
@_compile.register
def _print(self, node: ast.Print):
self.table = self.context.tables.get('entries')
expr = self._compile_from(node.from_clause)
targets = [EvalTarget(EvalRow(), 'ROW(*)', False)]
return EvalQuery(self.table, targets, expr, None, None, None, None, False)
def transform_journal(journal):
"""Translate a Journal entry into an uncompiled Select statement.
Args:
journal: An instance of a Journal object.
Returns:
An instance of an uncompiled Select object.
"""
cooked_select = parser.parse("""
SELECT
date,
flag,
MAXWIDTH(payee, 48),
MAXWIDTH(narration, 80),
account,
{summary_func}(position),
{summary_func}(balance)
{where}
""".format(where=('WHERE account ~ "{}"'.format(journal.account)
if journal.account
else ''),
summary_func=journal.summary_func or ''))
return ast.Select(cooked_select.targets,
journal.from_clause,
cooked_select.where_clause,
None, None, None, None, None)
def transform_balances(balances):
"""Translate a Balances entry into an uncompiled Select statement.
Args:
balances: An instance of a Balance object.
Returns:
An instance of an uncompiled Select object.
"""
## FIXME: Change the aggregation rules to allow GROUP-BY not to include the
## non-aggregate ORDER-BY columns, so we could just GROUP-BY accounts here
## instead of having to include the sort-key. I think it should be fine if
## the first or last sort-order value gets used, because it would simplify
## the input statement.
cooked_select = parser.parse("""
SELECT account, SUM({}(position))
GROUP BY account, ACCOUNT_SORTKEY(account)
ORDER BY ACCOUNT_SORTKEY(account)
""".format(balances.summary_func or ""))
return ast.Select(cooked_select.targets,
balances.from_clause,
balances.where_clause,
cooked_select.group_by,
cooked_select.order_by,
None, None, None)
def get_target_name(target):
"""Compute the target name.
This uses the same algorithm used by SQLite. If the target has an
AS clause assigning it a name, that will be the name used. If the
target refers directly to a column, then the target name is the
column name. Otherwise use the expression text.
"""
if target.name is not None:
return target.name
if isinstance(target.expression, ast.Column):
return target.expression.name
return target.expression.text.strip()
def get_columns_and_aggregates(node):
"""Find the columns and aggregate nodes below this tree.
All nodes under aggregate nodes are ignored.
Args:
node: An instance of EvalNode.
Returns:
A pair of (columns, aggregates), both of which are lists of EvalNode instances.
columns: The list of all columns accessed not under an aggregate node.
aggregates: The list of all aggregate nodes.
"""
columns = []
aggregates = []
_get_columns_and_aggregates(node, columns, aggregates)
return columns, aggregates
def _get_columns_and_aggregates(node, columns, aggregates):
"""Walk down a tree of nodes and fetch the column accessors and aggregates.
This function ignores all nodes under aggregate nodes.
Args:
node: An instance of EvalNode.
columns: An accumulator for columns found so far.
aggregate: An accumulator for aggregate notes found so far.
"""
if isinstance(node, EvalAggregator):
aggregates.append(node)
elif isinstance(node, EvalColumn):
columns.append(node)
else:
for child in node.childnodes():
_get_columns_and_aggregates(child, columns, aggregates)
def is_aggregate(node):
"""Return true if the node is an aggregate.
Args:
node: An instance of EvalNode.
Returns:
A boolean.
"""
# Note: We could be a tiny bit more efficient here, but it doesn't matter
# much. Performance of the query compilation matters very little overall.
_, aggregates = get_columns_and_aggregates(node)
return bool(aggregates)
def compile(context, statement, parameters=None):
return Compiler(context).compile(statement, parameters)
beanquery-master/beanquery/cursor.py000066400000000000000000000076351474576771600202670ustar00rootroot00000000000000from operator import attrgetter
from typing import Sequence
from . import types
from . import parser
from . import compiler
from . import query_execute
class Column(Sequence):
__module__ = 'beanquery'
def __init__(self, name, datatype):
self._name = name
self._type = datatype
_vars = tuple(attrgetter(name) for name in 'name type_code display_size internal_size precision scale null_ok'.split())
def __eq__(self, other):
if isinstance(other, type(self)):
return tuple(self) == tuple(other)
if isinstance(other, tuple):
# Used in tests.
return (self._name, self._type) == other
return NotImplemented
def __repr__(self):
return f'{self.__module__}.{self.__class__.__name__}({self._name!r}, {types.name(self._type)})'
def __len__(self):
return 7
def __getitem__(self, key):
if isinstance(key, slice):
return tuple(getter(self) for getter in self._vars(key))
return self._vars[key](self)
@property
def name(self):
return self._name
@property
def datatype(self):
# Extension to the DB-API.
return self._type
@property
def type_code(self):
# The DB-API specification is vague on this point, but other
# database connection libraries expose this as an int. It does
# not make much sense to keep a mapping between int type code
# and actual types, thus just return the hash of the type
# object.
return hash(self._type)
@property
def display_size(self):
return None
@property
def internal_size(self):
return None
@property
def precision(self):
return None
@property
def scale(self):
return None
@property
def null_ok(self):
return None
class Cursor:
def __init__(self, connection):
self._context = connection
self._description = None
self._rows = None
self._pos = 0
self.arraysize = 1
@property
def connection(self):
return self._context
def execute(self, query, params=None):
if not isinstance(query, parser.ast.Node):
query = parser.parse(query)
query = compiler.compile(self._context, query, params)
description, rows = query_execute.execute_query(query)
self._description = description
self._rows = rows
self._pos = 0
return self
def executemany(self, query, params=None):
query = parser.parse(query)
for p in params:
self.execute(query, p)
@property
def description(self):
return self._description
@property
def rowcount(self):
return len(self._rows) if self._rows is not None else -1
@property
def rownumber(self):
return self._pos
def fetchone(self):
# This implementation pops items from the front of the results
# rows list and is thus not efficient, especially for large
# results sets.
if self._rows is None or not len(self._rows):
return None
self._pos += 1
return self._rows.pop(0)
def fetchmany(self, size=None):
if self._rows is None:
return []
n = size if size is not None else self.arraysize
rows = self._rows[:n]
self._rows = self._rows[n:]
self._pos += len(rows)
return rows
def fetchall(self):
if self._rows is None:
return []
rows = self._rows
self._rows = []
self._pos += len(rows)
return rows
def close(self):
# Required by the DB-API.
pass
def setinputsizes(self, sizes):
# Required by the DB-API.
pass
def setoutputsize(self, size, column=None):
# Required by the DB-API.
pass
def __iter__(self):
return iter(self._rows if self._rows is not None else [])
beanquery-master/beanquery/cursor_test.py000066400000000000000000000050641474576771600213200ustar00rootroot00000000000000import unittest
import sqlite3
import beanquery
from beanquery.sources import test
class APITests:
def test_description(self):
curs = self.conn.cursor()
self.assertIsNone(curs.description)
curs.execute(f'SELECT x FROM {self.table} WHERE x = 0')
self.assertEqual([c[0] for c in curs.description], ['x'])
column = curs.description[0]
self.assertEqual(len(column), 7)
def test_cursor_not_initialized(self):
curs = self.conn.cursor()
self.assertIsNone(curs.fetchone())
self.assertEqual(curs.fetchmany(), [])
self.assertEqual(curs.fetchall(), [])
def test_cursor_fetchone(self):
curs = self.conn.cursor()
curs.execute(f'SELECT x FROM {self.table} WHERE x < 2')
row = curs.fetchone()
self.assertEqual(row, (0, ))
row = curs.fetchone()
self.assertEqual(row, (1, ))
row = curs.fetchone()
self.assertIsNone(row)
def test_cursor_fetchall(self):
curs = self.conn.cursor()
curs.execute(f'SELECT x FROM {self.table} WHERE x < 2')
rows = curs.fetchall()
self.assertEqual(rows, [(0, ), (1, )])
rows = curs.fetchall()
self.assertEqual(rows, [])
def test_cursor_fethmany(self):
curs = self.conn.cursor()
curs.execute(f'SELECT x FROM {self.table} WHERE x < 2')
rows = curs.fetchmany()
self.assertEqual(rows, [(0, )])
rows = curs.fetchmany()
self.assertEqual(rows, [(1, )])
rows = curs.fetchmany()
self.assertEqual(rows, [])
def test_cursor_iterator(self):
curs = self.conn.cursor()
o = object()
row = next(iter(curs), o)
self.assertIs(row, o)
curs = self.conn.cursor()
curs.execute(f'SELECT x FROM {self.table} WHERE x < 2')
iterator = iter(curs)
row = next(iterator)
self.assertEqual(row, (0, ))
row = next(iterator)
self.assertEqual(row, (1, ))
row = next(iterator, o)
self.assertIs(row, o)
class TestSQLite(APITests, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.table = 'test'
cls.conn = sqlite3.connect(':memory:')
curs = cls.conn.cursor()
curs.execute('CREATE TABLE test (x int)')
curs.executemany('INSERT INTO test VALUES (?)', [(i, ) for i in range(16)])
class TestBeanquery(APITests, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.table = '#test'
cls.conn = beanquery.Connection()
cls.conn.tables['test'] = test.Table(16)
beanquery-master/beanquery/errors.py000066400000000000000000000031051474576771600202520ustar00rootroot00000000000000"""
Exceptions hierarchy defined by the DB-API:
Exception
Warning
Error
InterfaceError
DatabaseError
DataError
OperationalError
IntegrityError
InternalError
ProgrammingError
NotSupportedError
"""
class Warning(Exception):
"""Exception raised for important warnings."""
__module__ = 'beanquery'
class Error(Exception):
"""Base exception for all errors."""
__module__ = 'beanquery'
class InterfaceError(Error):
"""An error related to the database interface rather than the database itself."""
__module__ = 'beanquery'
class DatabaseError(Error):
"""Exception raised for errors that are related to the database."""
__module__ = 'beanquery'
class DataError(DatabaseError):
"""An error caused by problems with the processed data."""
__module__ = 'beanquery'
class OperationalError(DatabaseError):
"""An error related to the database's operation."""
__module__ = 'beanquery'
class IntegrityError(DatabaseError):
"""An error caused when the relational integrity of the database is affected."""
__module__ = 'beanquery'
class InternalError(DatabaseError):
"""An error generated when the database encounters an internal error."""
__module__ = 'beanquery'
class ProgrammingError(DatabaseError):
"""Exception raised for programming errors."""
__module__ = 'beanquery'
class NotSupportedError(DatabaseError):
"""A method or database API was used which is not supported by the database."""
__module__ = 'beanquery'
beanquery-master/beanquery/hashable.py000066400000000000000000000045021474576771600205070ustar00rootroot00000000000000import decimal
import pickle
import textwrap
from . import types
# Hashable types. Checking ``issubclass(T, types.Hashable)`` does not
# work because named tuples pass that test and beancount uses many named
# tuples that have dictionary members making them effectively not
# hashable.
FUNDAMENTAL = frozenset({
bool,
bytes,
complex,
decimal.Decimal,
float,
int,
str,
# These are hashable only if the contained objects are hashable.
frozenset,
tuple,
})
# Function reducing non-hashable types to something hashable.
REDUCERS = {}
def register(datatype, func):
"""Register reduce function for non-hashable type.
The reduce function maps an non-hashable object into an hashable
representation. This representation does not need to capture all the
object facets, but it should retrurn someting unique enough to avoid
too many hashing collisions.
"""
REDUCERS[datatype] = func
def make(columns):
"""Build an hashable tuple subclass."""
# When all columns are hashable, pass the input tuple through as is.
if all(column.datatype in FUNDAMENTAL for column in columns):
return lambda x: x
datatypes = ', '.join(types.name(column.datatype) for column in columns)
# Code generation inspired by standard library ``dataclasses.py``.
parts = []
locals = {}
for i, column in enumerate(columns):
if column.datatype in FUNDAMENTAL:
parts.append(f'self[{i}]')
elif column.datatype is dict:
parts.append(f'*self[{i}].keys(), *self[{i}].values()')
elif column.datatype is set:
parts.append(f'*self[{i}]')
else:
func = REDUCERS.get(column.datatype, pickle.dumps)
fname = f'func{i}'
locals[fname] = func
parts.append(f'{fname}(self[{i}])')
objs = ', '.join(parts)
names = ', '.join(locals.keys())
code = textwrap.dedent(f'''
def create({names}):
def __hash__(self):
return hash(({objs}))
return __hash__
''')
clsname = f'Hashable[{datatypes}]'
ns = {}
exec(code, globals(), ns)
func = ns['create'](**locals)
func.__qualname__ = f'{clsname}.{func.__name__}'
members = dict(tuple.__dict__)
members['__hash__'] = func
return type(clsname, (tuple,), members)
beanquery-master/beanquery/hashable_test.py000066400000000000000000000026721474576771600215540ustar00rootroot00000000000000import dataclasses
import unittest
from beanquery import hashable
from beanquery.cursor import Column
class TestHashable(unittest.TestCase):
def test_fundamental(self):
columns = (Column('b', bool), Column('i', int), Column('s', str))
wrap = hashable.make(columns)
obj = (True, 42, 'universe')
self.assertIs(wrap(obj), obj)
hash(obj)
def test_dict(self):
columns = (Column('b', bool), Column('d', dict))
wrap = hashable.make(columns)
obja = (True, {'answer': 42})
a = hash(wrap(obja))
objb = (True, {'answer': 42})
b = hash(wrap(objb))
self.assertIsNot(obja, objb)
self.assertEqual(a, b)
objc = (False, {'answer': 42})
c = hash(wrap(objc))
self.assertNotEqual(a, c)
objd = (True, {'answer': 43})
d = hash(wrap(objd))
self.assertNotEqual(a, d)
def test_registered(self):
@dataclasses.dataclass
class Foo:
xid: int
meta: dict
hashable.register(Foo, lambda obj: obj.xid)
columns = (Column('b', bool), Column('foo', Foo))
wrap = hashable.make(columns)
obja = (True, Foo(1, {'test': 1}))
a = hash(wrap(obja))
objb = (True, Foo(1, {'test': 2}))
b = hash(wrap(objb))
self.assertEqual(a, b)
objc = (True, Foo(2, {'test': 2}))
c = hash(wrap(objc))
self.assertNotEqual(a, c)
beanquery-master/beanquery/numberify.py000066400000000000000000000203241474576771600207400ustar00rootroot00000000000000"""Code to split table columns containing amounts and inventories into number columns.
For example, given a column with this content:
----- amount ------
101.23 USD
200 JPY
99.23 USD
38.34 USD, 100 JPY
We can convert this into two columns and remove the currencies:
-amount (USD)- -amount (JPY)-
101.23
200
99.23
38.34 100
The point is that the columns should be typed as numbers to make this importable
into a spreadsheet and able to be processed.
Notes:
* This handles the Amount, Position and Inventory datatypes. There is code to
automatically recognize columns containing such types from a table of strings
and convert such columns to their corresponding guessed data types.
* The per-currency columns are ordered in decreasing order of the number of
instances of numbers seen for each currency. So if the most numbers you have
in a column are USD, then the USD column renders first.
* Cost basis specifications should be unmodified and reported to a dedicated
extra column, like this:
----- amount ------
1 AAPL {21.23 USD}
We can convert this into two columns and remove the currencies:
-amount (AAPL)- -Cost basis-
1 {21.23 USD}
(Eventually we might support the conversion of cost amounts as well, but they
may contain other information, such as a label or a date, so for now we don't
convert them. I'm not sure there's a good practical use case in doing that
yet.)
* We may provide some options to break out only some of the currencies into
columns, in order to handle the case where an inventory contains a large
number of currencies and we want to only operate on a restricted set of
operating currencies.
* If you provide a DisplayFormatter object to the numberification routine, they
quantize each column according to their currency's precision. It is
recommended that you do that.
"""
__copyright__ = "Copyright (C) 2015-2017 Martin Blais"
__license__ = "GNU GPLv2"
import collections
from decimal import Decimal
from beancount.core import amount
from beancount.core import position
from beancount.core import inventory
from .cursor import Column
def numberify_results(columns, drows, dformat=None):
"""Number rows containing Amount, Position or Inventory types.
Args:
result_types: A list of items describing the names and data types of the items in
each column.
result_rows: A list of ResultRow instances.
dformat: An optional DisplayFormatter. If set, quantize the numbers by
their currency-specific precision when converting the Amount's,
Position's or Inventory'es..
Returns:
A pair of modified (result_types, result_rows) with converted datatypes.
"""
# Build an array of converters.
converters = []
for index, column in enumerate(columns):
convert_col_fun = CONVERTING_TYPES.get(column.datatype)
if convert_col_fun is None:
converters.append(IdentityConverter(column.name, column.datatype, index))
else:
col_converters = convert_col_fun(column.name, drows, index)
converters.extend(col_converters)
# Derive the output types from the expected outputs from the converters
# themselves.
otypes = tuple(Column(c.name, c.dtype) for c in converters)
# Convert the input rows by processing them through the converters.
orows = []
for drow in drows:
orow = []
for converter in converters:
orow.append(converter(drow, dformat))
orows.append(orow)
return otypes, orows
class IdentityConverter:
"""A converter that simply copies its column."""
def __init__(self, name, dtype, index):
self.name = name
self.dtype = dtype
self.index = index
def __call__(self, drow, _):
return drow[self.index]
class AmountConverter:
"""A converter that extracts the number of an amount for a specific currency."""
dtype = Decimal
def __init__(self, name, index, currency):
self.name = name
self.index = index
self.currency = currency
def __call__(self, drow, dformat):
vamount = drow[self.index]
if vamount and vamount.currency == self.currency:
number = vamount.number
if dformat:
number = dformat.quantize(number, self.currency)
else:
number = None
return number
def convert_col_Amount(name, drows, index):
"""Create converters for a column of type Amount.
Args:
name: A string, the column name.
drows: The table of objects.
index: The column number.
Returns:
A list of Converter instances, one for each of the currency types found.
"""
currency_map = collections.defaultdict(int)
for drow in drows:
vamount = drow[index]
if vamount and vamount.currency:
currency_map[vamount.currency] += 1
return [AmountConverter('{} ({})'.format(name, currency), index, currency)
for currency, _ in sorted(currency_map.items(),
key=lambda item: (item[1], item[0]),
reverse=True)]
class PositionConverter:
"""A converter that extracts the number of a position for a specific currency."""
dtype = Decimal
def __init__(self, name, index, currency):
self.name = name
self.index = index
self.currency = currency
def __call__(self, drow, dformat):
pos = drow[self.index]
if pos and pos.units.currency == self.currency:
number = pos.units.number
if dformat:
number = dformat.quantize(pos.units.number, self.currency)
else:
number = None
return number
def convert_col_Position(name, drows, index):
"""Create converters for a column of type Position.
Args:
name: A string, the column name.
drows: The table of objects.
index: The column number.
Returns:
A list of Converter instances, one for each of the currency types found.
"""
currency_map = collections.defaultdict(int)
for drow in drows:
pos = drow[index]
if pos and pos.units.currency:
currency_map[pos.units.currency] += 1
return [PositionConverter('{} ({})'.format(name, currency), index, currency)
for currency, _ in sorted(currency_map.items(),
key=lambda item: (item[1], item[0]),
reverse=True)]
class InventoryConverter:
"""A converter that extracts the number of a inventory for a specific currency.
If there are multiple lots we aggregate by currency."""
dtype = Decimal
def __init__(self, name, index, currency):
self.name = name
self.index = index
self.currency = currency
def __call__(self, drow, dformat):
inv = drow[self.index]
# FIXME:: get_currency_units() returns ZERO and not None when the value
# isn't present. This should be fixed to distinguish between the two.
number = inv.get_currency_units(self.currency).number
if number and dformat:
number = dformat.quantize(number, self.currency)
return number or None
def convert_col_Inventory(name, drows, index):
"""Create converters for a column of type Inventory.
Args:
name: A string, the column name.
drows: The table of objects.
index: The column number.
Returns:
A list of Converter instances, one for each of the currency types found.
"""
currency_map = collections.defaultdict(int)
for drow in drows:
inv = drow[index]
for currency in inv.currencies():
currency_map[currency] += 1
return [InventoryConverter('{} ({})'.format(name, currency), index, currency)
for currency, _ in sorted(currency_map.items(),
key=lambda item: (item[1], item[0]),
reverse=True)]
# A mapping of data types to their converter factory.
CONVERTING_TYPES = {
amount.Amount : convert_col_Amount,
position.Position : convert_col_Position,
inventory.Inventory : convert_col_Inventory,
}
beanquery-master/beanquery/numberify_test.py000066400000000000000000000131231474576771600217760ustar00rootroot00000000000000__copyright__ = "Copyright (C) 2015-2017 Martin Blais"
__license__ = "GNU GPLv2"
import datetime
import unittest
from decimal import Decimal
from beancount.core.number import D
from beancount.core.amount import A
from beancount.core import amount
from beancount.core import position
from beancount.core import inventory
from beancount.core import display_context
from beanquery import numberify
from beanquery.cursor import Column
class TestNumerifySimple(unittest.TestCase):
input_amounts = ["24.17 CAD",
"-77.02 CAD",
"11.39 CAD",
"800.00 USD",
"41.17 CAD",
"950.00 USD",
"110 JPY",
"-947.00 USD"]
expected_types = (('pos (CAD)', Decimal),
('pos (USD)', Decimal),
('pos (JPY)', Decimal))
expected_rows = [[D('24.17'), None, None],
[D('-77.02'), None, None],
[D('11.39'), None, None],
[None, D('800.00'), None],
[D('41.17'), None, None],
[None, D('950.00'), None],
[None, None, D('110')],
[None, D('-947.00'), None]]
def test_amount(self):
itypes = (Column('pos', amount.Amount), )
irows = [(A(string),) for string in self.input_amounts]
atypes, arows = numberify.numberify_results(itypes, irows)
self.assertEqual(self.expected_types, atypes)
self.assertEqual(self.expected_rows, arows)
def test_position(self):
itypes = (Column('pos', position.Position), )
irows = [(position.from_string(string),) for string in self.input_amounts]
atypes, arows = numberify.numberify_results(itypes, irows)
self.assertEqual(self.expected_types, atypes)
self.assertEqual(self.expected_rows, arows)
def test_inventory(self):
itypes = (Column('pos', inventory.Inventory), )
irows = [(inventory.from_string(string),) for string in self.input_amounts]
atypes, arows = numberify.numberify_results(itypes, irows)
self.assertEqual(self.expected_types, atypes)
self.assertEqual(self.expected_rows, arows)
class TestNumerifyIdentity(unittest.TestCase):
def test_identity(self):
itypes = (Column('date', datetime.date), Column('name', str), Column('count', int), )
irows = [[datetime.date(2015, 9, 8), 'Testing', 3]]
atypes, arows = numberify.numberify_results(itypes, irows)
self.assertEqual(itypes, atypes)
self.assertEqual(irows, arows)
class TestNumerifyInventory(unittest.TestCase):
def test_inventory(self):
itypes = (Column('balance', inventory.Inventory), )
irows = [[inventory.from_string('10 HOOL {23.00 USD}')],
[inventory.from_string('2.11 USD, 3.44 CAD')],
[inventory.from_string('-2 HOOL {24.00 USD}, 5.66 CAD')]]
atypes, arows = numberify.numberify_results(itypes, irows)
self.assertEqual((('balance (HOOL)', Decimal),
('balance (CAD)', Decimal),
('balance (USD)', Decimal), ), atypes)
self.assertEqual([[D('10'), None, None],
[None, D('3.44'), D('2.11')],
[D('-2'), D('5.66'), None]], arows)
class TestNumerifyPrecision(unittest.TestCase):
def test_precision(self):
# Some display context.
dcontext = display_context.DisplayContext()
dcontext.update(D('111'), 'JPY')
dcontext.update(D('1.111'), 'RGAGX')
dcontext.update(D('1.11'), 'USD')
dformat = dcontext.build()
# Input data.
itypes = (Column('number', Decimal),
Column('amount', amount.Amount),
Column('position', position.Position),
Column('inventory', inventory.Inventory))
irows = [[D(amt.split()[0]),
A(amt),
position.from_string(amt),
inventory.from_string(amt)]
for amt in ['123.45678909876 JPY',
'1.67321232123 RGAGX',
'5.67345434543 USD']]
# First check with no explicit quantization.
atypes, arows = numberify.numberify_results(itypes, irows)
erows = [[D('123.45678909876'),
None, None, D('123.45678909876'),
None, None, D('123.45678909876'),
None, None, D('123.45678909876')],
[D('1.67321232123'),
None, D('1.67321232123'), None,
None, D('1.67321232123'), None,
None, D('1.67321232123'), None],
[D('5.67345434543'),
D('5.67345434543'), None, None,
D('5.67345434543'), None, None,
D('5.67345434543'), None, None]]
self.assertEqual(erows, arows)
# Then compare with quantization.
atypes, arows = numberify.numberify_results(itypes, irows, dformat)
erows = [[D('123.45678909876'),
None, None, D('123'),
None, None, D('123'),
None, None, D('123')],
[D('1.67321232123'),
None, D('1.673'), None, None,
D('1.673'), None, None,
D('1.673'), None],
[D('5.67345434543'),
D('5.67'), None, None,
D('5.67'), None, None,
D('5.67'), None, None]]
self.assertEqual(erows, arows)
if __name__ == '__main__':
unittest.main()
beanquery-master/beanquery/parser/000077500000000000000000000000001474576771600176615ustar00rootroot00000000000000beanquery-master/beanquery/parser/__init__.py000066400000000000000000000030211474576771600217660ustar00rootroot00000000000000import datetime
import decimal
import tatsu
from ..errors import ProgrammingError
from .parser import BQLParser
from . import ast
class BQLSemantics:
def set_context(self, ctx):
self._ctx = ctx
def null(self, value):
return None
def integer(self, value):
return int(value)
def decimal(self, value):
return decimal.Decimal(value)
def date(self, value):
return datetime.date.fromisoformat(value)
def string(self, value):
return value[1:-1]
def boolean(self, value):
return value == 'TRUE'
def identifier(self, value):
return value.lower()
def asterisk(self, value):
return ast.Asterisk()
def list(self, value):
return list(value)
def ordering(self, value):
return ast.Ordering[value or 'ASC']
def _default(self, value, typename=None):
if typename is not None:
func = getattr(ast, typename)
return func(**{name.rstrip('_'): value for name, value in value.items()})
return value
class ParseError(ProgrammingError):
def __init__(self, parseinfo):
super().__init__('syntax error')
self.parseinfo = parseinfo
def parse(text):
try:
return BQLParser().parse(text, semantics=BQLSemantics())
except tatsu.exceptions.ParseError as exc:
line = exc.tokenizer.line_info(exc.pos).line
parseinfo = tatsu.infos.ParseInfo(exc.tokenizer, exc.item, exc.pos, exc.pos + 1, line, [])
raise ParseError(parseinfo) from exc
beanquery-master/beanquery/parser/ast.py000066400000000000000000000172261474576771600210320ustar00rootroot00000000000000from __future__ import annotations
import dataclasses
import datetime
import enum
import sys
import textwrap
import typing
if typing.TYPE_CHECKING:
from typing import Any, Optional, Union
def _indent(text):
return textwrap.indent(text, ' ')
def _fields(node):
for field in dataclasses.fields(node):
if field.repr:
yield field.name, getattr(node, field.name)
def tosexp(node):
if isinstance(node, Node):
return f'({node.__class__.__name__.lower()}\n' + _indent(
'\n'.join(f'{name.replace("_", "-")}: {tosexp(value)}'
for name, value in _fields(node) if value is not None) + ')')
if isinstance(node, list):
return '(\n' + _indent('\n'.join(tosexp(i) for i in node)) + ')'
if isinstance(node, enum.Enum):
return node.name.lower()
return repr(node)
def walk(node):
if isinstance(node, Node):
for name, child in _fields(node):
yield from walk(child)
yield node
if isinstance(node, list):
for child in node:
yield from walk(child)
class Node:
"""Base class for BQL AST nodes."""
__slots__ = ()
parseinfo = None
@property
def text(self):
if not self.parseinfo:
return None
text = self.parseinfo.tokenizer.text
return text[self.parseinfo.pos:self.parseinfo.endpos]
def tosexp(self):
return tosexp(self)
def walk(self):
return walk(self)
def node(name, fields):
"""Manufacture an AST node class."""
return dataclasses.make_dataclass(
name,
[*fields.split(), ('parseinfo', None, dataclasses.field(default=None, compare=False, repr=False))],
bases=(Node,),
**({'slots': True} if sys.version_info[:2] >= (3, 10) else {}))
# A 'select' query action.
#
# Attributes:
# targets: Either a single 'Asterisk' instance of a list of 'Target'
# instances.
# from_clause: An instance of 'From', or None if absent.
# where_clause: A root expression node, or None if absent.
# group_by: An instance of 'GroupBy', or None if absent.
# order_by: An instance of 'OrderBy', or None if absent.
# pivot_by: An instance of 'PivotBy', or None if absent.
# limit: An integer, or None is absent.
# distinct: A boolean value (True), or None if absent.
Select = node('Select', 'targets from_clause where_clause group_by order_by pivot_by limit distinct')
# A select query that produces final balances for accounts.
# This is equivalent to
#
# SELECT account, sum(position)
# FROM ...
# WHERE ...
# GROUP BY account
#
# Attributes:
# summary_func: A method on an inventory to call on the position column.
# May be to extract units, value at cost, etc.
# from_clause: An instance of 'From', or None if absent.
Balances = node('Balances', 'summary_func from_clause where_clause')
# A select query that produces a journal of postings.
# This is equivalent to
#
# SELECT date, flag, payee, narration, ... FROM
# WHERE account =
#
# Attributes:
# account: A string, the name of the account to restrict to.
# summary_func: A method on an inventory to call on the position column.
# May be to extract units, value at cost, etc.
# from_clause: An instance of 'From', or None if absent.
Journal = node('Journal', 'account summary_func from_clause')
# A query that will simply print the selected entries in Beancount format.
#
# Attributes:
# from_clause: An instance of 'From', or None if absent.
Print = node('Print', 'from_clause')
# A parsed SELECT column or target.
#
# Attributes:
# expression: A tree of expression nodes from the parser.
# name: A string, the given name of the target (given by "AS ").
Target = node('Target', 'expression name')
# A placeholder in SELECT * or COUNT(*) constructs.
Asterisk = node('Asterisk', '')
# A FROM clause.
#
# Attributes:
# expression: A tree of expression nodes from the parser.
# close: A CLOSE clause, either None if absent, a boolean if the clause
# was present by no date was provided, or a datetime.date instance if
# a date was provided.
@dataclasses.dataclass(**({'slots': True} if sys.version_info[:2] >= (3, 10) else {}))
class From(Node):
expression: Optional[Node] = None
open: Optional[datetime.date] = None
close: Optional[Union[datetime.date, bool]] = None
clear: Optional[bool] = None
parseinfo: Any = dataclasses.field(default=None, compare=False, repr=False)
# A GROUP BY clause.
#
# Attributes:
# columns: A list of group-by expressions, simple Column() or otherwise.
# having: An expression tree for the optional HAVING clause, or None.
GroupBy = node('GroupBy', 'columns having')
# An ORDER BY clause.
#
# Attributes:
# column: order-by expression, simple Column() or otherwise.
# ordering: The sort order as an Ordering enum value.
OrderBy = node('OrderBy', 'column ordering')
class Ordering(enum.IntEnum):
# The enum values are chosen in this way to be able to use them
# directly as the reverse parameter to the list sort() method.
ASC = 0
DESC = 1
def __repr__(self):
return f"{self.__class__.__name__}.{self.name}"
# An PIVOT BY clause.
#
# Attributes:
# columns: A list of group-by expressions, simple Column() or otherwise.
PivotBy = node('PivotBy', 'columns')
# A reference to a table.
#
# Attributes:
# name: The table name.
Table = node('Table', 'name')
# A reference to a column.
#
# Attributes:
# name: A string, the name of the column to access.
Column = node('Column', 'name')
# A function call.
#
# Attributes:
# fname: A string, the name of the function.
# operands: A list of other expressions, the arguments of the function to
# evaluate. This is possibly an empty list.
Function = node('Function', 'fname operands')
Attribute = node('Attribute', 'operand name')
Subscript = node('Subscript', 'operand key')
# A constant node.
#
# Attributes:
# value: The constant value this represents.
Constant = node('Constant', 'value')
# A query parameter placeholder.
#
# Attributes:
# name: The placeholder name
Placeholder = node('Placeholder', 'name')
# Base class for unary operators.
#
# Attributes:
# operand: An expression, the operand of the operator.
UnaryOp = node('UnaryOp', 'operand')
# Base class for binary operators.
#
# Attributes:
# left: An expression, the left operand.
# right: An expression, the right operand.
BinaryOp = node('BinaryOp', 'left right')
# Base class for boolean operators.
BoolOp = node('BoolOp', 'args')
# Between
Between = node('Between', 'operand lower upper')
# Negation operator.
class Not(UnaryOp):
__slots__ = ()
class IsNull(UnaryOp):
__slots__ = ()
class IsNotNull(UnaryOp):
__slots__ = ()
# Boolean operators.
class And(BoolOp):
__slots__ = ()
class Or(BoolOp):
__slots__ = ()
# Equality and inequality comparison operators.
class Equal(BinaryOp):
__slots__ = ()
class NotEqual(BinaryOp):
__slots__ = ()
class Greater(BinaryOp):
__slots__ = ()
class GreaterEq(BinaryOp):
__slots__ = ()
class Less(BinaryOp):
__slots__ = ()
class LessEq(BinaryOp):
__slots__ = ()
# Regular expression match operator.
class Match(BinaryOp):
__slots__ = ()
class NotMatch(BinaryOp):
__slots__ = ()
class Matches(BinaryOp):
__slots__ = ()
# Membership operators.
class In(BinaryOp):
__slots__ = ()
class NotIn(BinaryOp):
__slots__ = ()
# Arithmetic operators.
class Neg(UnaryOp):
__slots__ = ()
class Mul(BinaryOp):
__slots__ = ()
class Div(BinaryOp):
__slots__ = ()
class Mod(BinaryOp):
__slots__ = ()
class Add(BinaryOp):
__slots__ = ()
class Sub(BinaryOp):
__slots__ = ()
Any = node('Any', 'left op right')
All = node('All', 'left op right')
beanquery-master/beanquery/parser/bql.ebnf000066400000000000000000000125131474576771600212750ustar00rootroot00000000000000@@grammar :: BQL
@@parseinfo :: True
@@ignorecase :: True
@@keyword :: 'AND' 'AS' 'ASC' 'BY' 'DESC' 'DISTINCT' 'FALSE' 'FROM'
'GROUP' 'HAVING' 'IN' 'IS' 'LIMIT' 'NOT' 'OR' 'ORDER' 'PIVOT'
'SELECT' 'TRUE' 'WHERE'
@@keyword :: 'BALANCES' 'JOURNAL' 'PRINT'
@@comments :: /(\/\*([^*]|[\r\n]|(\*+([^*\/]|[\r\n])))*\*+\/)/
@@eol_comments :: /\;[^\n]*?$/
bql
= @:statement [';'] $
;
statement
=
| select
| balances
| journal
| print
;
select::Select
= 'SELECT' ['DISTINCT' distinct:`True`] targets:(','.{ target }+ | asterisk)
['FROM' from_clause:(table | subselect | from)]
['WHERE' where_clause:expression]
['GROUP' 'BY' group_by:groupby]
['ORDER' 'BY' order_by:','.{order}+]
['PIVOT' 'BY' pivot_by:pivotby]
['LIMIT' limit:integer]
;
subselect
= '(' @:select ')'
;
from::From
=
| 'OPEN' ~ 'ON' open:date ['CLOSE' ('ON' close:date | {} close:`True`)] ['CLEAR' clear:`True`]
| 'CLOSE' ~ ('ON' close:date | {} close:`True`) ['CLEAR' clear:`True`]
| 'CLEAR' ~ clear:`True`
| expression:expression ['OPEN' 'ON' open:date] ['CLOSE' ('ON' close:date | {} close:`True`)] ['CLEAR' clear:`True`]
;
table::Table
= name:/#([a-zA-Z_][a-zA-Z0-9_]*)?/
;
groupby::GroupBy
= columns:','.{ (integer | expression) }+ ['HAVING' having:expression]
;
order::OrderBy
= column:(integer | expression) ordering:ordering
;
ordering
= ['DESC' | 'ASC']
;
pivotby::PivotBy
= columns+:(integer | column) ',' columns+:(integer | column)
;
target::Target
= expression:expression ['AS' name:identifier]
;
expression
=
| disjunction
| conjunction
;
disjunction
=
| or
| conjunction
;
or::Or::BoolOp
= args+:conjunction { 'OR' args+:conjunction }+
;
conjunction
=
| and
| inversion
;
and::And::BoolOp
= args+:inversion { 'AND' args+:inversion }+
;
inversion
=
| not
| comparison
;
not::Not::UnaryOp
= 'NOT' operand:inversion
;
comparison
=
| any
| all
| lt
| lte
| gt
| gte
| eq
| neq
| in
| notin
| match
| notmatch
| matches
| isnull
| isnotnull
| between
| sum
;
any::Any
= left:sum op:op 'any' '(' right:expression ')'
;
all::All
= left:sum op:op 'all' '(' right:expression ')'
;
op
=
| '<'
| '<='
| '>'
| '>='
| '='
| '!='
| '~'
| '!~'
| '?~'
;
lt::Less::BinaryOp
= left:sum '<' right:sum
;
lte::LessEq::BinaryOp
= left:sum '<=' right:sum
;
gt::Greater::BinaryOp
= left:sum '>' right:sum
;
gte::GreaterEq::BinaryOp
= left:sum '>=' right:sum
;
eq::Equal::BinaryOp
= left:sum '=' right:sum
;
neq::NotEqual::BinaryOp
= left:sum '!=' right:sum
;
in::In::BinaryOp
= left:sum 'IN' right:sum
;
notin::NotIn::BinaryOp
= left:sum 'NOT' 'IN' right:sum
;
match::Match::BinaryOp
= left:sum '~' right:sum
;
notmatch::NotMatch::BinaryOp
= left:sum '!~' right:sum
;
matches::Matches::BinaryOp
= left:sum '?~' right:sum
;
isnull::IsNull::UnaryOp
= operand:sum 'IS' 'NULL'
;
isnotnull::IsNotNull::UnaryOp
= operand:sum 'IS' 'NOT' 'NULL'
;
between::Between
= operand:sum 'BETWEEN' lower:sum 'AND' upper:sum
;
sum
=
| add
| sub
| term
;
add::Add::BinaryOp
= left:sum '+' ~ right:term
;
sub::Sub::BinaryOp
= left:sum '-' ~ right:term
;
term
=
| mul
| div
| mod
| factor
;
mul::Mul::BinaryOp
= left:term '*' ~ right:factor
;
div::Div::BinaryOp
= left:term '/' ~ right:factor
;
mod::Mod::BinaryOp
= left:term '%' ~ right:factor
;
factor
=
| unary
| '(' @:expression ')'
;
unary
=
| uplus
| uminus
| primary
;
uplus
= '+' @:atom
;
uminus::Neg::UnaryOp
= '-' operand:factor
;
primary
=
| attribute
| subscript
| atom
;
attribute::Attribute
= operand:primary '.' name:identifier
;
subscript::Subscript
= operand:primary '[' key:string ']'
;
atom
=
| select
| function
| constant
| column
| placeholder
;
placeholder::Placeholder
=
| '%s' name:``
| '%(' name:identifier ')s'
;
function::Function
=
| fname:identifier '(' operands:','.{ expression } ')'
| fname:identifier '(' operands+:asterisk ')'
;
column::Column
= name:identifier
;
literal
=
| date
| decimal
| integer
| string
| null
| boolean
;
constant::Constant
= value:(literal | list)
;
list
= '(' &( literal ',') @:','.{ (literal | ()) }+ ')'
;
@name
identifier
= /[a-zA-Z_][a-zA-Z0-9_]*/
;
asterisk
= '*'
;
string
= /(\"[^\"]*\"|\'[^\']*\')/
;
boolean
= 'TRUE' | 'FALSE'
;
null
= 'NULL'
;
integer
= /\d+/
;
decimal
= /([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)/
;
date
= /(\d{4}-\d{2}-\d{2})/
;
balances::Balances
= 'BALANCES'
['AT' summary_func:identifier]
['FROM' from_clause:from]
['WHERE' where_clause:expression]
;
journal::Journal
= 'JOURNAL'
[account:string]
['AT' summary_func:identifier]
['FROM' from_clause:from]
;
print::Print
= 'PRINT'
['FROM' from_clause:from]
;
beanquery-master/beanquery/parser/parser.py000066400000000000000000001035451474576771600215370ustar00rootroot00000000000000#!/usr/bin/env python3
# WARNING: CAVEAT UTILITOR
#
# This file was automatically generated by TatSu.
#
# https://pypi.python.org/pypi/tatsu/
#
# Any changes you make to it will be overwritten the next time
# the file is generated.
# ruff: noqa: C405, COM812, I001, F401, PLR1702, PLC2801, SIM117
from __future__ import annotations
import sys
from pathlib import Path
from tatsu.buffering import Buffer
from tatsu.parsing import Parser
from tatsu.parsing import tatsumasu
from tatsu.parsing import leftrec, nomemo, isname
from tatsu.infos import ParserConfig
from tatsu.util import re, generic_main
KEYWORDS: set[str] = {
'AND',
'AS',
'ASC',
'BY',
'DESC',
'DISTINCT',
'FALSE',
'FROM',
'GROUP',
'HAVING',
'IN',
'IS',
'LIMIT',
'NOT',
'OR',
'ORDER',
'PIVOT',
'SELECT',
'TRUE',
'WHERE',
'BALANCES',
'JOURNAL',
'PRINT',
}
class BQLBuffer(Buffer):
def __init__(self, text, /, config: ParserConfig | None = None, **settings):
config = ParserConfig.new(
config,
owner=self,
whitespace=None,
nameguard=None,
ignorecase=True,
namechars='',
parseinfo=True,
comments='(\\/\\*([^*]|[\\r\\n]|(\\*+([^*\\/]|[\\r\\n])))*\\*+\\/)',
eol_comments='\\;[^\\n]*?$',
keywords=KEYWORDS,
start='bql',
)
config = config.replace(**settings)
super().__init__(text, config=config)
class BQLParser(Parser):
def __init__(self, /, config: ParserConfig | None = None, **settings):
config = ParserConfig.new(
config,
owner=self,
whitespace=None,
nameguard=None,
ignorecase=True,
namechars='',
parseinfo=True,
comments='(\\/\\*([^*]|[\\r\\n]|(\\*+([^*\\/]|[\\r\\n])))*\\*+\\/)',
eol_comments='\\;[^\\n]*?$',
keywords=KEYWORDS,
start='bql',
)
config = config.replace(**settings)
super().__init__(config=config)
@tatsumasu()
def _bql_(self):
self._statement_()
self.name_last_node('@')
with self._optional():
self._token(';')
self._check_eof()
@tatsumasu()
def _statement_(self):
with self._choice():
with self._option():
self._select_()
with self._option():
self._balances_()
with self._option():
self._journal_()
with self._option():
self._print_()
self._error(
'expecting one of: '
"'BALANCES' 'JOURNAL' 'PRINT' 'SELECT'"
''
)
@tatsumasu('Attribute')
@nomemo
def _attribute_(self):
self._primary_()
self.name_last_node('operand')
self._token('.')
self._identifier_()
self.name_last_node('name')
self._define(['name', 'operand'], [])
@tatsumasu('Subscript')
@nomemo
def _subscript_(self):
self._primary_()
self.name_last_node('operand')
self._token('[')
self._string_()
self.name_last_node('key')
self._token(']')
self._define(['key', 'operand'], [])
@tatsumasu()
def _atom_(self):
with self._choice():
with self._option():
self._select_()
with self._option():
self._function_()
with self._option():
self._constant_()
with self._option():
self._column_()
with self._option():
self._placeholder_()
self._error(
'expecting one of: '
"'%(' '%s' 'SELECT' "
''
''
'